summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/agere/et131x.c36
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.c6
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.c32
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe.c31
-rw-r--r--drivers/net/ethernet/amazon/Kconfig2
-rw-r--r--drivers/net/ethernet/amazon/ena/Makefile2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h76
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c267
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h84
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_debugfs.c62
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_debugfs.h27
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_devlink.c210
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_devlink.h21
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c55
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c62
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h14
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_phc.c233
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_phc.h37
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h8
-rw-r--r--drivers/net/ethernet/amd/xgbe/Makefile2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h20
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c142
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c204
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c401
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c13
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c24
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c75
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h53
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c39
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c9
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c79
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig9
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c1
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c9
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnge/Makefile12
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge.h218
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_core.c388
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_devlink.c306
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_devlink.h18
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_ethtool.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_ethtool.h9
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c508
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h110
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c703
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h27
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_netdev.c268
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_netdev.h206
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_resc.c605
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_resc.h94
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_rmem.c438
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_rmem.h188
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c37
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c113
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c26
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h10914
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c12
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c7
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h2
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c26
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c39
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.h4
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c37
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c12
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/pm3393.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c37
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c105
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c8
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c4
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c57
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c56
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c11
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c28
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c44
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c41
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c36
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c15
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c12
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h22
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c110
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h6
-rw-r--r--drivers/net/ethernet/freescale/fec.h15
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c216
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c42
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c2
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c17
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c24
-rw-r--r--drivers/net/ethernet/google/Kconfig1
-rw-r--r--drivers/net/ethernet/google/gve/Makefile4
-rw-r--r--drivers/net/ethernet/google/gve/gve.h83
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c101
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h30
-rw-r--r--drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c25
-rw-r--r--drivers/net/ethernet/google/gve/gve_desc_dqo.h3
-rw-r--r--drivers/net/ethernet/google/gve/gve_dqo.h3
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c34
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c429
-rw-r--r--drivers/net/ethernet/google/gve/gve_ptp.c139
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c14
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c201
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c384
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c57
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c38
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c1048
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c79
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c107
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c1367
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c80
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c27
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c47
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.c23
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c220
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h21
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c27
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h8
-rw-r--r--drivers/net/ethernet/intel/Kconfig3
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c77
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c34
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c68
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h155
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c730
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c46
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c165
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c293
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c45
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c25
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h47
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h38
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c79
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h12
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.c62
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.h12
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h83
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c110
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c52
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c80
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_prototype.h3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c17
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h42
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_type.h34
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c35
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c10
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.c6
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/port.c2
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/port.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h297
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_cgu_regs.h181
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c717
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h58
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c53
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_debugfs.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c1424
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.h33
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c112
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c78
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h68
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c52
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c100
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c313
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h177
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c582
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h55
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c55
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tspll.c626
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tspll.h31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c87
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c132
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.h23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vlan_mode.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c24
-rw-r--r--drivers/net/ethernet/intel/idpf/Makefile1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h170
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.c37
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.h18
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq_api.h2
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c49
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c338
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_idc.c503
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c139
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c33
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_mem.h8
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.c136
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.h17
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c51
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_vf_dev.c45
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c315
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.h9
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c55
-rw-r--r--drivers/net/ethernet/intel/idpf/virtchnl2.h278
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h9
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c20
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c6
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c41
-rw-r--r--drivers/net/ethernet/intel/igb/igb_xsk.c3
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h27
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c11
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h57
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h5
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c48
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c110
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c36
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c118
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/region.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h18
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c276
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c32
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c239
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c53
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h226
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c150
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c2
-rw-r--r--drivers/net/ethernet/intel/libeth/Kconfig10
-rw-r--r--drivers/net/ethernet/intel/libeth/Makefile8
-rw-r--r--drivers/net/ethernet/intel/libeth/priv.h37
-rw-r--r--drivers/net/ethernet/intel/libeth/rx.c42
-rw-r--r--drivers/net/ethernet/intel/libeth/tx.c41
-rw-r--r--drivers/net/ethernet/intel/libeth/xdp.c451
-rw-r--r--drivers/net/ethernet/intel/libeth/xsk.c271
-rw-r--r--drivers/net/ethernet/intel/libie/Kconfig6
-rw-r--r--drivers/net/ethernet/intel/libie/Makefile4
-rw-r--r--drivers/net/ethernet/intel/libie/adminq.c52
-rw-r--r--drivers/net/ethernet/intel/libie/rx.c7
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.h2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c6
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h6
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c31
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c78
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h33
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c424
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h81
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h40
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c106
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h24
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c243
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h81
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c98
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c184
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c56
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c252
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c44
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h54
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c232
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c177
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h49
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c44
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.c7
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c6
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c229
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h18
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c24
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_mcu.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c315
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dim.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c188
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c362
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c152
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c1039
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c108
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c529
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c166
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wc.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c2
-rw-r--r--drivers/net/ethernet/meta/fbnic/Makefile1
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h3
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h161
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c29
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_devlink.c4
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c239
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c230
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h52
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c123
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw_log.h45
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h19
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c169
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.h27
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h16
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c21
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_phylink.c126
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h3
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c31
-rw-r--r--drivers/net/ethernet/microsoft/Kconfig1
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c548
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c14
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c328
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c82
-rw-r--r--drivers/net/ethernet/neterion/s2io.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c15
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c17
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_phc.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c12
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ptp.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c22
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c2
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c34
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c3
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase.h1
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c39
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c2
-rw-r--r--drivers/net/ethernet/renesas/rtsn.c5
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c45
-rw-r--r--drivers/net/ethernet/sfc/ef10.c1
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c99
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c55
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h6
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c6
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool.c1
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.c77
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.h2
-rw-r--r--drivers/net/ethernet/sfc/siena/farch.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_pcol.h12
-rw-r--r--drivers/net/ethernet/sfc/siena/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.c6
-rw-r--r--drivers/net/ethernet/sfc/tc_encap_actions.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c73
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c712
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c129
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c49
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c5
-rw-r--r--drivers/net/ethernet/sun/niu.c50
-rw-r--r--drivers/net/ethernet/sun/niu.h8
-rw-r--r--drivers/net/ethernet/sun/sunhme.c2
-rw-r--r--drivers/net/ethernet/sun/sunqe.h2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c31
-rw-r--r--drivers/net/ethernet/ti/cpts.c2
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.c158
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.h80
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c167
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h19
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c4
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_switch_map.h3
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig35
-rw-r--r--drivers/net/ethernet/wangxun/Makefile2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/Makefile1
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c23
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c58
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_mbx.c243
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_mbx.h22
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ptp.c2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_sriov.c4
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h16
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf.c599
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf.h127
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_common.c414
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_common.h22
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c280
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h14
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c4
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h2
-rw-r--r--drivers/net/ethernet/wangxun/ngbevf/Makefile9
-rw-r--r--drivers/net/ethernet/wangxun/ngbevf/ngbevf_main.c261
-rw-r--r--drivers/net/ethernet/wangxun/ngbevf/ngbevf_type.h29
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c1
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c8
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c22
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h4
-rw-r--r--drivers/net/ethernet/wangxun/txgbevf/Makefile9
-rw-r--r--drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c314
-rw-r--r--drivers/net/ethernet/wangxun/txgbevf/txgbevf_type.h26
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c2
609 files changed, 28314 insertions, 21816 deletions
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 678eddb36172..5c8217638dda 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -2459,6 +2459,10 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
skb->data,
skb_headlen(skb),
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ dma_addr))
+ return -ENOMEM;
+
desc[frag].addr_lo = lower_32_bits(dma_addr);
desc[frag].addr_hi = upper_32_bits(dma_addr);
frag++;
@@ -2468,6 +2472,10 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
skb->data,
skb_headlen(skb) / 2,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ dma_addr))
+ return -ENOMEM;
+
desc[frag].addr_lo = lower_32_bits(dma_addr);
desc[frag].addr_hi = upper_32_bits(dma_addr);
frag++;
@@ -2478,6 +2486,10 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
skb_headlen(skb) / 2,
skb_headlen(skb) / 2,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ dma_addr))
+ goto unmap_first_out;
+
desc[frag].addr_lo = lower_32_bits(dma_addr);
desc[frag].addr_hi = upper_32_bits(dma_addr);
frag++;
@@ -2489,6 +2501,9 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
0,
desc[frag].len_vlan,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, dma_addr))
+ goto unmap_out;
+
desc[frag].addr_lo = lower_32_bits(dma_addr);
desc[frag].addr_hi = upper_32_bits(dma_addr);
frag++;
@@ -2578,6 +2593,27 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
&adapter->regs->global.watchdog_timer);
}
return 0;
+
+unmap_out:
+ // Unmap the body of the packet with map_page
+ while (--i) {
+ frag--;
+ dma_addr = desc[frag].addr_lo;
+ dma_addr |= (u64)desc[frag].addr_hi << 32;
+ dma_unmap_page(&adapter->pdev->dev, dma_addr,
+ desc[frag].len_vlan, DMA_TO_DEVICE);
+ }
+
+unmap_first_out:
+ // Unmap the header with map_single
+ while (frag--) {
+ dma_addr = desc[frag].addr_lo;
+ dma_addr |= (u64)desc[frag].addr_hi << 32;
+ dma_unmap_single(&adapter->pdev->dev, dma_addr,
+ desc[frag].len_vlan, DMA_TO_DEVICE);
+ }
+
+ return -ENOMEM;
}
static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c
index 06dea3a13e77..e6b802e3d844 100644
--- a/drivers/net/ethernet/airoha/airoha_eth.c
+++ b/drivers/net/ethernet/airoha/airoha_eth.c
@@ -551,9 +551,7 @@ static int airoha_fe_init(struct airoha_eth *eth)
static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
{
- enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
struct airoha_qdma *qdma = q->qdma;
- struct airoha_eth *eth = qdma->eth;
int qid = q - &qdma->q_rx[0];
int nframes = 0;
@@ -577,9 +575,6 @@ static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
e->dma_addr = page_pool_get_dma_addr(page) + offset;
e->dma_len = SKB_WITH_OVERHEAD(q->buf_size);
- dma_sync_single_for_device(eth->dev, e->dma_addr, e->dma_len,
- dir);
-
val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len);
WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr));
@@ -2984,6 +2979,7 @@ static int airoha_probe(struct platform_device *pdev)
error_napi_stop:
for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
airoha_qdma_stop_napi(&eth->qdma[i]);
+ airoha_ppe_deinit(eth);
error_hw_cleanup:
for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
airoha_hw_cleanup(&eth->qdma[i]);
diff --git a/drivers/net/ethernet/airoha/airoha_npu.c b/drivers/net/ethernet/airoha/airoha_npu.c
index 0e5b8c21b9aa..9ab964c536e1 100644
--- a/drivers/net/ethernet/airoha/airoha_npu.c
+++ b/drivers/net/ethernet/airoha/airoha_npu.c
@@ -161,7 +161,7 @@ static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id,
}
static int airoha_npu_run_firmware(struct device *dev, void __iomem *base,
- struct reserved_mem *rmem)
+ struct resource *res)
{
const struct firmware *fw;
void __iomem *addr;
@@ -178,9 +178,9 @@ static int airoha_npu_run_firmware(struct device *dev, void __iomem *base,
goto out;
}
- addr = devm_ioremap(dev, rmem->base, rmem->size);
- if (!addr) {
- ret = -ENOMEM;
+ addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(addr)) {
+ ret = PTR_ERR(addr);
goto out;
}
@@ -401,12 +401,13 @@ struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr)
return ERR_PTR(-ENODEV);
pdev = of_find_device_by_node(np);
- of_node_put(np);
if (!pdev) {
dev_err(dev, "cannot find device node %s\n", np->name);
+ of_node_put(np);
return ERR_PTR(-ENODEV);
}
+ of_node_put(np);
if (!try_module_get(THIS_MODULE)) {
dev_err(dev, "failed to get the device driver module\n");
@@ -474,9 +475,8 @@ static const struct regmap_config regmap_config = {
static int airoha_npu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct reserved_mem *rmem;
struct airoha_npu *npu;
- struct device_node *np;
+ struct resource res;
void __iomem *base;
int i, irq, err;
@@ -498,15 +498,9 @@ static int airoha_npu_probe(struct platform_device *pdev)
if (IS_ERR(npu->regmap))
return PTR_ERR(npu->regmap);
- np = of_parse_phandle(dev->of_node, "memory-region", 0);
- if (!np)
- return -ENODEV;
-
- rmem = of_reserved_mem_lookup(np);
- of_node_put(np);
-
- if (!rmem)
- return -ENODEV;
+ err = of_reserved_mem_region_to_resource(dev->of_node, 0, &res);
+ if (err)
+ return err;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -539,12 +533,12 @@ static int airoha_npu_probe(struct platform_device *pdev)
if (err)
return err;
- err = airoha_npu_run_firmware(dev, base, rmem);
+ err = airoha_npu_run_firmware(dev, base, &res);
if (err)
return dev_err_probe(dev, err, "failed to run npu firmware\n");
regmap_write(npu->regmap, REG_CR_NPU_MIB(10),
- rmem->base + NPU_EN7581_FIRMWARE_RV32_MAX_SIZE);
+ res.start + NPU_EN7581_FIRMWARE_RV32_MAX_SIZE);
regmap_write(npu->regmap, REG_CR_NPU_MIB(11), 0x40000); /* SRAM 256K */
regmap_write(npu->regmap, REG_CR_NPU_MIB(12), 0);
regmap_write(npu->regmap, REG_CR_NPU_MIB(21), 1);
@@ -552,7 +546,7 @@ static int airoha_npu_probe(struct platform_device *pdev)
/* setting booting address */
for (i = 0; i < NPU_NUM_CORES; i++)
- regmap_write(npu->regmap, REG_CR_BOOT_BASE(i), rmem->base);
+ regmap_write(npu->regmap, REG_CR_BOOT_BASE(i), res.start);
usleep_range(1000, 2000);
/* enable NPU cores */
diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c
index 0e217acfc5ef..c354d536bc66 100644
--- a/drivers/net/ethernet/airoha/airoha_ppe.c
+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
@@ -232,6 +232,7 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
FIELD_PREP(AIROHA_FOE_IB1_BIND_UDP, l4proto == IPPROTO_UDP) |
FIELD_PREP(AIROHA_FOE_IB1_BIND_VLAN_LAYER, data->vlan.num) |
FIELD_PREP(AIROHA_FOE_IB1_BIND_VPM, data->vlan.num) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_PPPOE, data->pppoe.num) |
AIROHA_FOE_IB1_BIND_TTL;
hwe->ib1 = val;
@@ -281,33 +282,42 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
hwe->ipv6.data = qdata;
hwe->ipv6.ib2 = val;
l2 = &hwe->ipv6.l2;
+ l2->etype = ETH_P_IPV6;
} else {
hwe->ipv4.data = qdata;
hwe->ipv4.ib2 = val;
l2 = &hwe->ipv4.l2.common;
+ l2->etype = ETH_P_IP;
}
l2->dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
l2->dest_mac_lo = get_unaligned_be16(data->eth.h_dest + 4);
if (type <= PPE_PKT_TYPE_IPV4_DSLITE) {
+ struct airoha_foe_mac_info *mac_info;
+
l2->src_mac_hi = get_unaligned_be32(data->eth.h_source);
hwe->ipv4.l2.src_mac_lo =
get_unaligned_be16(data->eth.h_source + 4);
+
+ mac_info = (struct airoha_foe_mac_info *)l2;
+ mac_info->pppoe_id = data->pppoe.sid;
} else {
- l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id);
+ l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id) |
+ FIELD_PREP(AIROHA_FOE_MAC_PPPOE_ID,
+ data->pppoe.sid);
}
if (data->vlan.num) {
- l2->etype = dsa_port >= 0 ? BIT(dsa_port) : 0;
l2->vlan1 = data->vlan.hdr[0].id;
if (data->vlan.num == 2)
l2->vlan2 = data->vlan.hdr[1].id;
- } else if (dsa_port >= 0) {
- l2->etype = BIT(15) | BIT(dsa_port);
- } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
- l2->etype = ETH_P_IPV6;
- } else {
- l2->etype = ETH_P_IP;
+ }
+
+ if (dsa_port >= 0) {
+ l2->etype = BIT(dsa_port);
+ l2->etype |= !data->vlan.num ? BIT(15) : 0;
+ } else if (data->pppoe.num) {
+ l2->etype = ETH_P_PPP_SES;
}
return 0;
@@ -959,6 +969,11 @@ static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port,
case FLOW_ACTION_VLAN_POP:
break;
case FLOW_ACTION_PPPOE_PUSH:
+ if (data.pppoe.num == 1 || data.vlan.num == 2)
+ return -EOPNOTSUPP;
+
+ data.pppoe.sid = act->pppoe.sid;
+ data.pppoe.num++;
break;
default:
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/amazon/Kconfig b/drivers/net/ethernet/amazon/Kconfig
index c37fa393b99e..95dcc3969f0c 100644
--- a/drivers/net/ethernet/amazon/Kconfig
+++ b/drivers/net/ethernet/amazon/Kconfig
@@ -19,7 +19,9 @@ if NET_VENDOR_AMAZON
config ENA_ETHERNET
tristate "Elastic Network Adapter (ENA) support"
depends on PCI_MSI && !CPU_BIG_ENDIAN
+ depends on PTP_1588_CLOCK_OPTIONAL
select DIMLIB
+ select NET_DEVLINK
help
This driver supports Elastic Network Adapter (ENA)"
diff --git a/drivers/net/ethernet/amazon/ena/Makefile b/drivers/net/ethernet/amazon/ena/Makefile
index 6ab615365172..6d8036bc1823 100644
--- a/drivers/net/ethernet/amazon/ena/Makefile
+++ b/drivers/net/ethernet/amazon/ena/Makefile
@@ -5,4 +5,4 @@
obj-$(CONFIG_ENA_ETHERNET) += ena.o
-ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o ena_xdp.o
+ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o ena_xdp.o ena_phc.o ena_devlink.o ena_debugfs.o
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index 9d9fa6559354..898ecd96b96a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -60,6 +60,7 @@ enum ena_admin_aq_feature_id {
ENA_ADMIN_AENQ_CONFIG = 26,
ENA_ADMIN_LINK_CONFIG = 27,
ENA_ADMIN_HOST_ATTR_CONFIG = 28,
+ ENA_ADMIN_PHC_CONFIG = 29,
ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
};
@@ -127,6 +128,14 @@ enum ena_admin_get_stats_scope {
ENA_ADMIN_ETH_TRAFFIC = 1,
};
+enum ena_admin_phc_type {
+ ENA_ADMIN_PHC_TYPE_READLESS = 0,
+};
+
+enum ena_admin_phc_error_flags {
+ ENA_ADMIN_PHC_ERROR_FLAG_TIMESTAMP = BIT(0),
+};
+
/* ENA SRD configuration for ENI */
enum ena_admin_ena_srd_flags {
/* Feature enabled */
@@ -943,7 +952,9 @@ struct ena_admin_host_info {
* 4 : rss_configurable_function_key
* 5 : reserved
* 6 : rx_page_reuse
- * 31:7 : reserved
+ * 7 : reserved
+ * 8 : phc
+ * 31:9 : reserved
*/
u32 driver_supported_features;
};
@@ -975,7 +986,7 @@ struct ena_admin_feature_rss_ind_table {
struct ena_admin_rss_ind_table_entry inline_entry;
};
-/* When hint value is 0, driver should use it's own predefined value */
+/* When hint value is 0, driver should use its own predefined value */
struct ena_admin_ena_hw_hints {
/* value in ms */
u16 mmio_read_timeout;
@@ -1023,6 +1034,43 @@ struct ena_admin_queue_ext_feature_desc {
};
};
+struct ena_admin_feature_phc_desc {
+ /* PHC type as defined in enum ena_admin_get_phc_type,
+ * used only for GET command.
+ */
+ u8 type;
+
+ /* Reserved - MBZ */
+ u8 reserved1[3];
+
+ /* PHC doorbell address as an offset to PCIe MMIO REG BAR,
+ * used only for GET command.
+ */
+ u32 doorbell_offset;
+
+ /* Max time for valid PHC retrieval, passing this threshold will
+ * fail the get-time request and block PHC requests for
+ * block_timeout_usec, used only for GET command.
+ */
+ u32 expire_timeout_usec;
+
+ /* PHC requests block period, blocking starts if PHC request expired
+ * in order to prevent floods on busy device,
+ * used only for GET command.
+ */
+ u32 block_timeout_usec;
+
+ /* Shared PHC physical address (ena_admin_phc_resp),
+ * used only for SET command.
+ */
+ struct ena_common_mem_addr output_address;
+
+ /* Shared PHC Size (ena_admin_phc_resp),
+ * used only for SET command.
+ */
+ u32 output_length;
+};
+
struct ena_admin_get_feat_resp {
struct ena_admin_acq_common_desc acq_common_desc;
@@ -1052,6 +1100,8 @@ struct ena_admin_get_feat_resp {
struct ena_admin_feature_intr_moder_desc intr_moderation;
struct ena_admin_ena_hw_hints hw_hints;
+
+ struct ena_admin_feature_phc_desc phc;
} u;
};
@@ -1085,6 +1135,9 @@ struct ena_admin_set_feat_cmd {
/* LLQ configuration */
struct ena_admin_feature_llq_desc llq;
+
+ /* PHC configuration */
+ struct ena_admin_feature_phc_desc phc;
} u;
};
@@ -1162,6 +1215,23 @@ struct ena_admin_ena_mmio_req_read_less_resp {
u32 reg_val;
};
+struct ena_admin_phc_resp {
+ /* Request Id, received from DB register */
+ u16 req_id;
+
+ u8 reserved1[6];
+
+ /* PHC timestamp (nsec) */
+ u64 timestamp;
+
+ u8 reserved2[12];
+
+ /* Bit field of enum ena_admin_phc_error_flags */
+ u32 error_flags;
+
+ u8 reserved3[32];
+};
+
/* aq_common_desc */
#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0)
@@ -1260,6 +1330,8 @@ struct ena_admin_ena_mmio_req_read_less_resp {
#define ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK BIT(4)
#define ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_SHIFT 6
#define ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK BIT(6)
+#define ENA_ADMIN_HOST_INFO_PHC_SHIFT 8
+#define ENA_ADMIN_HOST_INFO_PHC_MASK BIT(8)
/* aenq_common_desc */
#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0)
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 66445617fbfb..e67b592e5697 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -41,6 +41,12 @@
#define ENA_MAX_ADMIN_POLL_US 5000
+/* PHC definitions */
+#define ENA_PHC_DEFAULT_EXPIRE_TIMEOUT_USEC 10
+#define ENA_PHC_DEFAULT_BLOCK_TIMEOUT_USEC 1000
+#define ENA_PHC_REQ_ID_OFFSET 0xDEAD
+#define ENA_PHC_ERROR_FLAGS (ENA_ADMIN_PHC_ERROR_FLAG_TIMESTAMP)
+
/*****************************************************************************/
/*****************************************************************************/
/*****************************************************************************/
@@ -1641,6 +1647,267 @@ void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
ena_dev->admin_queue.polling = polling;
}
+bool ena_com_phc_supported(struct ena_com_dev *ena_dev)
+{
+ return ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_PHC_CONFIG);
+}
+
+int ena_com_phc_init(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_phc_info *phc = &ena_dev->phc;
+
+ memset(phc, 0x0, sizeof(*phc));
+
+ /* Allocate shared mem used PHC timestamp retrieved from device */
+ phc->virt_addr = dma_alloc_coherent(ena_dev->dmadev,
+ sizeof(*phc->virt_addr),
+ &phc->phys_addr,
+ GFP_KERNEL);
+ if (unlikely(!phc->virt_addr))
+ return -ENOMEM;
+
+ spin_lock_init(&phc->lock);
+
+ phc->virt_addr->req_id = 0;
+ phc->virt_addr->timestamp = 0;
+
+ return 0;
+}
+
+int ena_com_phc_config(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_phc_info *phc = &ena_dev->phc;
+ struct ena_admin_get_feat_resp get_feat_resp;
+ struct ena_admin_set_feat_resp set_feat_resp;
+ struct ena_admin_set_feat_cmd set_feat_cmd;
+ int ret = 0;
+
+ /* Get device PHC default configuration */
+ ret = ena_com_get_feature(ena_dev,
+ &get_feat_resp,
+ ENA_ADMIN_PHC_CONFIG,
+ 0);
+ if (unlikely(ret)) {
+ netdev_err(ena_dev->net_device,
+ "Failed to get PHC feature configuration, error: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Supporting only readless PHC retrieval */
+ if (get_feat_resp.u.phc.type != ENA_ADMIN_PHC_TYPE_READLESS) {
+ netdev_err(ena_dev->net_device,
+ "Unsupported PHC type, error: %d\n",
+ -EOPNOTSUPP);
+ return -EOPNOTSUPP;
+ }
+
+ /* Update PHC doorbell offset according to device value,
+ * used to write req_id to PHC bar
+ */
+ phc->doorbell_offset = get_feat_resp.u.phc.doorbell_offset;
+
+ /* Update PHC expire timeout according to device
+ * or default driver value
+ */
+ phc->expire_timeout_usec = (get_feat_resp.u.phc.expire_timeout_usec) ?
+ get_feat_resp.u.phc.expire_timeout_usec :
+ ENA_PHC_DEFAULT_EXPIRE_TIMEOUT_USEC;
+
+ /* Update PHC block timeout according to device
+ * or default driver value
+ */
+ phc->block_timeout_usec = (get_feat_resp.u.phc.block_timeout_usec) ?
+ get_feat_resp.u.phc.block_timeout_usec :
+ ENA_PHC_DEFAULT_BLOCK_TIMEOUT_USEC;
+
+ /* Sanity check - expire timeout must not exceed block timeout */
+ if (phc->expire_timeout_usec > phc->block_timeout_usec)
+ phc->expire_timeout_usec = phc->block_timeout_usec;
+
+ /* Prepare PHC feature command */
+ memset(&set_feat_cmd, 0x0, sizeof(set_feat_cmd));
+ set_feat_cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ set_feat_cmd.feat_common.feature_id = ENA_ADMIN_PHC_CONFIG;
+ set_feat_cmd.u.phc.output_length = sizeof(*phc->virt_addr);
+ ret = ena_com_mem_addr_set(ena_dev,
+ &set_feat_cmd.u.phc.output_address,
+ phc->phys_addr);
+ if (unlikely(ret)) {
+ netdev_err(ena_dev->net_device,
+ "Failed setting PHC output address, error: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Send PHC feature command to the device */
+ ret = ena_com_execute_admin_command(&ena_dev->admin_queue,
+ (struct ena_admin_aq_entry *)&set_feat_cmd,
+ sizeof(set_feat_cmd),
+ (struct ena_admin_acq_entry *)&set_feat_resp,
+ sizeof(set_feat_resp));
+
+ if (unlikely(ret)) {
+ netdev_err(ena_dev->net_device,
+ "Failed to enable PHC, error: %d\n",
+ ret);
+ return ret;
+ }
+
+ phc->active = true;
+ netdev_dbg(ena_dev->net_device, "PHC is active in the device\n");
+
+ return ret;
+}
+
+void ena_com_phc_destroy(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_phc_info *phc = &ena_dev->phc;
+ unsigned long flags = 0;
+
+ /* In case PHC is not supported by the device, silently exiting */
+ if (!phc->virt_addr)
+ return;
+
+ spin_lock_irqsave(&phc->lock, flags);
+ phc->active = false;
+ spin_unlock_irqrestore(&phc->lock, flags);
+
+ dma_free_coherent(ena_dev->dmadev,
+ sizeof(*phc->virt_addr),
+ phc->virt_addr,
+ phc->phys_addr);
+ phc->virt_addr = NULL;
+}
+
+int ena_com_phc_get_timestamp(struct ena_com_dev *ena_dev, u64 *timestamp)
+{
+ volatile struct ena_admin_phc_resp *resp = ena_dev->phc.virt_addr;
+ const ktime_t zero_system_time = ktime_set(0, 0);
+ struct ena_com_phc_info *phc = &ena_dev->phc;
+ ktime_t expire_time;
+ ktime_t block_time;
+ unsigned long flags = 0;
+ int ret = 0;
+
+ if (!phc->active) {
+ netdev_err(ena_dev->net_device, "PHC feature is not active in the device\n");
+ return -EOPNOTSUPP;
+ }
+
+ spin_lock_irqsave(&phc->lock, flags);
+
+ /* Check if PHC is in blocked state */
+ if (unlikely(ktime_compare(phc->system_time, zero_system_time))) {
+ /* Check if blocking time expired */
+ block_time = ktime_add_us(phc->system_time, phc->block_timeout_usec);
+ if (!ktime_after(ktime_get(), block_time)) {
+ /* PHC is still in blocked state, skip PHC request */
+ phc->stats.phc_skp++;
+ ret = -EBUSY;
+ goto skip;
+ }
+
+ /* PHC is in active state, update statistics according
+ * to req_id and error_flags
+ */
+ if (READ_ONCE(resp->req_id) != phc->req_id) {
+ /* Device didn't update req_id during blocking time,
+ * this indicates on a device error
+ */
+ netdev_err(ena_dev->net_device,
+ "PHC get time request 0x%x failed (device error)\n",
+ phc->req_id);
+ phc->stats.phc_err_dv++;
+ } else if (resp->error_flags & ENA_PHC_ERROR_FLAGS) {
+ /* Device updated req_id during blocking time but got
+ * a PHC error, this occurs if device:
+ * - exceeded the get time request limit
+ * - received an invalid timestamp
+ */
+ netdev_err(ena_dev->net_device,
+ "PHC get time request 0x%x failed (error 0x%x)\n",
+ phc->req_id,
+ resp->error_flags);
+ phc->stats.phc_err_ts += !!(resp->error_flags &
+ ENA_ADMIN_PHC_ERROR_FLAG_TIMESTAMP);
+ } else {
+ /* Device updated req_id during blocking time
+ * with valid timestamp
+ */
+ phc->stats.phc_exp++;
+ }
+ }
+
+ /* Setting relative timeouts */
+ phc->system_time = ktime_get();
+ block_time = ktime_add_us(phc->system_time, phc->block_timeout_usec);
+ expire_time = ktime_add_us(phc->system_time, phc->expire_timeout_usec);
+
+ /* We expect the device to return this req_id once
+ * the new PHC timestamp is updated
+ */
+ phc->req_id++;
+
+ /* Initialize PHC shared memory with different req_id value
+ * to be able to identify once the device changes it to req_id
+ */
+ resp->req_id = phc->req_id + ENA_PHC_REQ_ID_OFFSET;
+
+ /* Writing req_id to PHC bar */
+ writel(phc->req_id, ena_dev->reg_bar + phc->doorbell_offset);
+
+ /* Stalling until the device updates req_id */
+ while (1) {
+ if (unlikely(ktime_after(ktime_get(), expire_time))) {
+ /* Gave up waiting for updated req_id, PHC enters into
+ * blocked state until passing blocking time,
+ * during this time any get PHC timestamp will fail with
+ * device busy error
+ */
+ ret = -EBUSY;
+ break;
+ }
+
+ /* Check if req_id was updated by the device */
+ if (READ_ONCE(resp->req_id) != phc->req_id) {
+ /* req_id was not updated by the device yet,
+ * check again on next loop
+ */
+ continue;
+ }
+
+ /* req_id was updated by the device which indicates that
+ * PHC timestamp and error_flags are updated too,
+ * checking errors before retrieving timestamp
+ */
+ if (unlikely(resp->error_flags & ENA_PHC_ERROR_FLAGS)) {
+ /* Retrieved invalid PHC timestamp, PHC enters into
+ * blocked state until passing blocking time,
+ * during this time any get PHC timestamp requests
+ * will fail with device busy error
+ */
+ ret = -EBUSY;
+ break;
+ }
+
+ /* PHC timestamp value is returned to the caller */
+ *timestamp = resp->timestamp;
+
+ /* Update statistic on valid PHC timestamp retrieval */
+ phc->stats.phc_cnt++;
+
+ /* This indicates PHC state is active */
+ phc->system_time = zero_system_time;
+ break;
+ }
+
+skip:
+ spin_unlock_irqrestore(&phc->lock, flags);
+
+ return ret;
+}
+
int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
{
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 9414e93d107b..64df2c48c9a6 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -210,6 +210,14 @@ struct ena_com_stats_admin {
u64 no_completion;
};
+struct ena_com_stats_phc {
+ u64 phc_cnt;
+ u64 phc_exp;
+ u64 phc_skp;
+ u64 phc_err_dv;
+ u64 phc_err_ts;
+};
+
struct ena_com_admin_queue {
void *q_dmadev;
struct ena_com_dev *ena_dev;
@@ -258,6 +266,47 @@ struct ena_com_mmio_read {
spinlock_t lock;
};
+/* PTP hardware clock (PHC) MMIO read data info */
+struct ena_com_phc_info {
+ /* Internal PHC statistics */
+ struct ena_com_stats_phc stats;
+
+ /* PHC shared memory - virtual address */
+ struct ena_admin_phc_resp *virt_addr;
+
+ /* System time of last PHC request */
+ ktime_t system_time;
+
+ /* Spin lock to ensure a single outstanding PHC read */
+ spinlock_t lock;
+
+ /* PHC doorbell address as an offset to PCIe MMIO REG BAR */
+ u32 doorbell_offset;
+
+ /* Shared memory read expire timeout (usec)
+ * Max time for valid PHC retrieval, passing this threshold will fail
+ * the get time request and block new PHC requests for block_timeout_usec
+ * in order to prevent floods on busy device
+ */
+ u32 expire_timeout_usec;
+
+ /* Shared memory read abort timeout (usec)
+ * PHC requests block period, blocking starts once PHC request expired
+ * in order to prevent floods on busy device,
+ * any PHC requests during block period will be skipped
+ */
+ u32 block_timeout_usec;
+
+ /* PHC shared memory - physical address */
+ dma_addr_t phys_addr;
+
+ /* Request id sent to the device */
+ u16 req_id;
+
+ /* True if PHC is active in the device */
+ bool active;
+};
+
struct ena_rss {
/* Indirect table */
u16 *host_rss_ind_tbl;
@@ -317,6 +366,7 @@ struct ena_com_dev {
u32 ena_min_poll_delay_us;
struct ena_com_mmio_read mmio_read;
+ struct ena_com_phc_info phc;
struct ena_rss rss;
u32 supported_features;
@@ -382,6 +432,40 @@ struct ena_aenq_handlers {
*/
int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev);
+/* ena_com_phc_init - Allocate and initialize PHC feature
+ * @ena_dev: ENA communication layer struct
+ * @note: This method assumes PHC is supported by the device
+ * @return - 0 on success, negative value on failure
+ */
+int ena_com_phc_init(struct ena_com_dev *ena_dev);
+
+/* ena_com_phc_supported - Return if PHC feature is supported by the device
+ * @ena_dev: ENA communication layer struct
+ * @note: This method must be called after getting supported features
+ * @return - supported or not
+ */
+bool ena_com_phc_supported(struct ena_com_dev *ena_dev);
+
+/* ena_com_phc_config - Configure PHC feature
+ * @ena_dev: ENA communication layer struct
+ * Configure PHC feature in driver and device
+ * @note: This method assumes PHC is supported by the device
+ * @return - 0 on success, negative value on failure
+ */
+int ena_com_phc_config(struct ena_com_dev *ena_dev);
+
+/* ena_com_phc_destroy - Destroy PHC feature
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_phc_destroy(struct ena_com_dev *ena_dev);
+
+/* ena_com_phc_get_timestamp - Retrieve PHC timestamp
+ * @ena_dev: ENA communication layer struct
+ * @timestamp: Retrieved PHC timestamp
+ * @return - 0 on success, negative value on failure
+ */
+int ena_com_phc_get_timestamp(struct ena_com_dev *ena_dev, u64 *timestamp);
+
/* ena_com_set_mmio_read_mode - Enable/disable the indirect mmio reg read mechanism
* @ena_dev: ENA communication layer struct
* @readless_supported: readless mode (enable/disable)
diff --git a/drivers/net/ethernet/amazon/ena/ena_debugfs.c b/drivers/net/ethernet/amazon/ena/ena_debugfs.c
new file mode 100644
index 000000000000..46ed80986724
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_debugfs.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/seq_file.h>
+#include <linux/pci.h>
+#include "ena_debugfs.h"
+#include "ena_phc.h"
+
+static int phc_stats_show(struct seq_file *file, void *priv)
+{
+ struct ena_adapter *adapter = file->private;
+
+ if (!ena_phc_is_active(adapter))
+ return 0;
+
+ seq_printf(file,
+ "phc_cnt: %llu\n",
+ adapter->ena_dev->phc.stats.phc_cnt);
+ seq_printf(file,
+ "phc_exp: %llu\n",
+ adapter->ena_dev->phc.stats.phc_exp);
+ seq_printf(file,
+ "phc_skp: %llu\n",
+ adapter->ena_dev->phc.stats.phc_skp);
+ seq_printf(file,
+ "phc_err_dv: %llu\n",
+ adapter->ena_dev->phc.stats.phc_err_dv);
+ seq_printf(file,
+ "phc_err_ts: %llu\n",
+ adapter->ena_dev->phc.stats.phc_err_ts);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(phc_stats);
+
+void ena_debugfs_init(struct net_device *dev)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+
+ adapter->debugfs_base =
+ debugfs_create_dir(dev_name(&adapter->pdev->dev), NULL);
+
+ debugfs_create_file("phc_stats",
+ 0400,
+ adapter->debugfs_base,
+ adapter,
+ &phc_stats_fops);
+}
+
+void ena_debugfs_terminate(struct net_device *dev)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+
+ debugfs_remove_recursive(adapter->debugfs_base);
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/ethernet/amazon/ena/ena_debugfs.h b/drivers/net/ethernet/amazon/ena/ena_debugfs.h
new file mode 100644
index 000000000000..dc61dd998867
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_debugfs.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ */
+
+#ifndef __ENA_DEBUGFS_H__
+#define __ENA_DEBUGFS_H__
+
+#include <linux/debugfs.h>
+#include <linux/netdevice.h>
+#include "ena_netdev.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+void ena_debugfs_init(struct net_device *dev);
+
+void ena_debugfs_terminate(struct net_device *dev);
+
+#else /* CONFIG_DEBUG_FS */
+
+static inline void ena_debugfs_init(struct net_device *dev) {}
+
+static inline void ena_debugfs_terminate(struct net_device *dev) {}
+
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* __ENA_DEBUGFS_H__ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_devlink.c b/drivers/net/ethernet/amazon/ena/ena_devlink.c
new file mode 100644
index 000000000000..ac81c24016dd
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_devlink.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ */
+
+#include "linux/pci.h"
+#include "ena_devlink.h"
+#include "ena_phc.h"
+
+static int ena_devlink_enable_phc_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+
+ if (!val.vbool)
+ return 0;
+
+ if (!ena_com_phc_supported(adapter->ena_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support PHC");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param ena_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(ENABLE_PHC,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL,
+ NULL,
+ ena_devlink_enable_phc_validate),
+};
+
+void ena_devlink_params_get(struct devlink *devlink)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+ union devlink_param_value val;
+ int err;
+
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
+ &val);
+ if (err) {
+ netdev_err(adapter->netdev, "Failed to query PHC param\n");
+ return;
+ }
+
+ ena_phc_enable(adapter, val.vbool);
+}
+
+void ena_devlink_disable_phc_param(struct devlink *devlink)
+{
+ union devlink_param_value value;
+
+ value.vbool = false;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
+ value);
+}
+
+static void ena_devlink_port_register(struct devlink *devlink)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+ struct devlink_port_attrs attrs = {};
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ devlink_port_attrs_set(&adapter->devlink_port, &attrs);
+ devl_port_register(devlink, &adapter->devlink_port, 0);
+}
+
+static void ena_devlink_port_unregister(struct devlink *devlink)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+
+ devl_port_unregister(&adapter->devlink_port);
+}
+
+static int ena_devlink_reload_down(struct devlink *devlink,
+ bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+
+ if (netns_change) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Namespace change is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ ena_devlink_port_unregister(devlink);
+
+ rtnl_lock();
+ ena_destroy_device(adapter, false);
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int ena_devlink_reload_up(struct devlink *devlink,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+ int err = 0;
+
+ rtnl_lock();
+ /* Check that no other routine initialized the device (e.g.
+ * ena_fw_reset_device()). Also we're under devlink_mutex here,
+ * so devlink isn't freed under our feet.
+ */
+ if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
+ err = ena_restore_device(adapter);
+
+ rtnl_unlock();
+
+ ena_devlink_port_register(devlink);
+
+ if (!err)
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
+
+ return err;
+}
+
+static const struct devlink_ops ena_devlink_ops = {
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
+ .reload_down = ena_devlink_reload_down,
+ .reload_up = ena_devlink_reload_up,
+};
+
+static int ena_devlink_configure_params(struct devlink *devlink)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+ union devlink_param_value value;
+ int rc;
+
+ rc = devlink_params_register(devlink, ena_devlink_params,
+ ARRAY_SIZE(ena_devlink_params));
+ if (rc) {
+ netdev_err(adapter->netdev, "Failed to register devlink params\n");
+ return rc;
+ }
+
+ value.vbool = ena_phc_is_enabled(adapter);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
+ value);
+
+ return 0;
+}
+
+struct devlink *ena_devlink_alloc(struct ena_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct devlink *devlink;
+
+ devlink = devlink_alloc(&ena_devlink_ops,
+ sizeof(struct ena_adapter *),
+ dev);
+ if (!devlink) {
+ netdev_err(adapter->netdev,
+ "Failed to allocate devlink struct\n");
+ return NULL;
+ }
+
+ ENA_DEVLINK_PRIV(devlink) = adapter;
+ adapter->devlink = devlink;
+
+ if (ena_devlink_configure_params(devlink))
+ goto free_devlink;
+
+ return devlink;
+
+free_devlink:
+ devlink_free(devlink);
+ return NULL;
+}
+
+static void ena_devlink_configure_params_clean(struct devlink *devlink)
+{
+ devlink_params_unregister(devlink, ena_devlink_params,
+ ARRAY_SIZE(ena_devlink_params));
+}
+
+void ena_devlink_free(struct devlink *devlink)
+{
+ ena_devlink_configure_params_clean(devlink);
+
+ devlink_free(devlink);
+}
+
+void ena_devlink_register(struct devlink *devlink, struct device *dev)
+{
+ devl_lock(devlink);
+ ena_devlink_port_register(devlink);
+ devl_register(devlink);
+ devl_unlock(devlink);
+}
+
+void ena_devlink_unregister(struct devlink *devlink)
+{
+ devl_lock(devlink);
+ ena_devlink_port_unregister(devlink);
+ devl_unregister(devlink);
+ devl_unlock(devlink);
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_devlink.h b/drivers/net/ethernet/amazon/ena/ena_devlink.h
new file mode 100644
index 000000000000..7a19ce4830d9
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_devlink.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ */
+#ifndef DEVLINK_H
+#define DEVLINK_H
+
+#include "ena_netdev.h"
+#include <net/devlink.h>
+
+#define ENA_DEVLINK_PRIV(devlink) \
+ (*(struct ena_adapter **)devlink_priv(devlink))
+
+struct devlink *ena_devlink_alloc(struct ena_adapter *adapter);
+void ena_devlink_free(struct devlink *devlink);
+void ena_devlink_register(struct devlink *devlink, struct device *dev);
+void ena_devlink_unregister(struct devlink *devlink);
+void ena_devlink_params_get(struct devlink *devlink);
+void ena_devlink_disable_phc_param(struct devlink *devlink);
+
+#endif /* DEVLINK_H */
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index a3c934c3de71..a81d3a7a3bb9 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -5,9 +5,11 @@
#include <linux/ethtool.h>
#include <linux/pci.h>
+#include <linux/net_tstamp.h>
#include "ena_netdev.h"
#include "ena_xdp.h"
+#include "ena_phc.h"
struct ena_stats {
char name[ETH_GSTRING_LEN];
@@ -298,6 +300,18 @@ static void ena_get_ethtool_stats(struct net_device *netdev,
ena_get_stats(adapter, data, true);
}
+static int ena_get_ts_info(struct net_device *netdev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
+
+ info->phc_index = ena_phc_get_index(adapter);
+
+ return 0;
+}
+
static int ena_get_sw_stats_count(struct ena_adapter *adapter)
{
return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
@@ -721,9 +735,11 @@ static u16 ena_flow_data_to_flow_hash(u32 hash_fields)
return data;
}
-static int ena_get_rss_hash(struct ena_com_dev *ena_dev,
- struct ethtool_rxnfc *cmd)
+static int ena_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
enum ena_admin_flow_hash_proto proto;
u16 hash_fields;
int rc;
@@ -772,9 +788,12 @@ static int ena_get_rss_hash(struct ena_com_dev *ena_dev,
return 0;
}
-static int ena_set_rss_hash(struct ena_com_dev *ena_dev,
- struct ethtool_rxnfc *cmd)
+static int ena_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
enum ena_admin_flow_hash_proto proto;
u16 hash_fields;
@@ -816,26 +835,6 @@ static int ena_set_rss_hash(struct ena_com_dev *ena_dev,
return ena_com_fill_hash_ctrl(ena_dev, proto, hash_fields);
}
-static int ena_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
-{
- struct ena_adapter *adapter = netdev_priv(netdev);
- int rc = 0;
-
- switch (info->cmd) {
- case ETHTOOL_SRXFH:
- rc = ena_set_rss_hash(adapter->ena_dev, info);
- break;
- case ETHTOOL_SRXCLSRLDEL:
- case ETHTOOL_SRXCLSRLINS:
- default:
- netif_err(adapter, drv, netdev,
- "Command parameter %d is not supported\n", info->cmd);
- rc = -EOPNOTSUPP;
- }
-
- return rc;
-}
-
static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
u32 *rules)
{
@@ -847,9 +846,6 @@ static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
info->data = adapter->num_io_queues;
rc = 0;
break;
- case ETHTOOL_GRXFH:
- rc = ena_get_rss_hash(adapter->ena_dev, info);
- break;
case ETHTOOL_GRXCLSRLCNT:
case ETHTOOL_GRXCLSRULE:
case ETHTOOL_GRXCLSRLALL:
@@ -1098,16 +1094,17 @@ static const struct ethtool_ops ena_ethtool_ops = {
.get_strings = ena_get_ethtool_strings,
.get_ethtool_stats = ena_get_ethtool_stats,
.get_rxnfc = ena_get_rxnfc,
- .set_rxnfc = ena_set_rxnfc,
.get_rxfh_indir_size = ena_get_rxfh_indir_size,
.get_rxfh_key_size = ena_get_rxfh_key_size,
.get_rxfh = ena_get_rxfh,
.set_rxfh = ena_set_rxfh,
+ .get_rxfh_fields = ena_get_rxfh_fields,
+ .set_rxfh_fields = ena_set_rxfh_fields,
.get_channels = ena_get_channels,
.set_channels = ena_set_channels,
.get_tunable = ena_get_tunable,
.set_tunable = ena_set_tunable,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = ena_get_ts_info,
};
void ena_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 86fd08f375df..92d149d4f091 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -19,6 +19,12 @@
#include "ena_pci_id_tbl.h"
#include "ena_xdp.h"
+#include "ena_phc.h"
+
+#include "ena_devlink.h"
+
+#include "ena_debugfs.h"
+
MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
MODULE_DESCRIPTION(DEVICE_NAME);
MODULE_LICENSE("GPL");
@@ -39,8 +45,6 @@ MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
static int ena_rss_init_default(struct ena_adapter *adapter);
static void check_for_admin_com_state(struct ena_adapter *adapter);
-static int ena_destroy_device(struct ena_adapter *adapter, bool graceful);
-static int ena_restore_device(struct ena_adapter *adapter);
static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
@@ -2743,7 +2747,8 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd
ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK |
ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK |
ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK |
- ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK;
+ ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK |
+ ENA_ADMIN_HOST_INFO_PHC_MASK;
rc = ena_com_set_host_attributes(ena_dev);
if (rc) {
@@ -3135,6 +3140,8 @@ static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev,
goto err_mmio_read_less;
}
+ ena_devlink_params_get(adapter->devlink);
+
/* ENA admin level init */
rc = ena_com_admin_init(ena_dev, &aenq_handlers);
if (rc) {
@@ -3188,6 +3195,10 @@ static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev,
if (unlikely(rc))
goto err_admin_init;
+ rc = ena_phc_init(adapter);
+ if (unlikely(rc && (rc != -EOPNOTSUPP)))
+ netdev_err(netdev, "Failed initializing PHC, error: %d\n", rc);
+
return 0;
err_admin_init:
@@ -3233,7 +3244,7 @@ err_disable_msix:
return rc;
}
-static int ena_destroy_device(struct ena_adapter *adapter, bool graceful)
+int ena_destroy_device(struct ena_adapter *adapter, bool graceful)
{
struct net_device *netdev = adapter->netdev;
struct ena_com_dev *ena_dev = adapter->ena_dev;
@@ -3271,6 +3282,8 @@ static int ena_destroy_device(struct ena_adapter *adapter, bool graceful)
ena_com_admin_destroy(ena_dev);
+ ena_phc_destroy(adapter);
+
ena_com_mmio_reg_read_request_destroy(ena_dev);
/* return reset reason to default value */
@@ -3282,7 +3295,7 @@ static int ena_destroy_device(struct ena_adapter *adapter, bool graceful)
return rc;
}
-static int ena_restore_device(struct ena_adapter *adapter)
+int ena_restore_device(struct ena_adapter *adapter)
{
struct ena_com_dev_get_features_ctx get_feat_ctx;
struct ena_com_dev *ena_dev = adapter->ena_dev;
@@ -3344,6 +3357,7 @@ err_device_destroy:
ena_com_wait_for_abort_completion(ena_dev);
ena_com_admin_destroy(ena_dev);
ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
+ ena_phc_destroy(adapter);
ena_com_mmio_reg_read_request_destroy(ena_dev);
err:
clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
@@ -3867,6 +3881,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ena_adapter *adapter;
struct net_device *netdev;
static int adapters_found;
+ struct devlink *devlink;
u32 max_num_io_queues;
bool wd_state;
int bars, rc;
@@ -3932,10 +3947,16 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, adapter);
+ rc = ena_phc_alloc(adapter);
+ if (rc) {
+ netdev_err(netdev, "ena_phc_alloc failed\n");
+ goto err_netdev_destroy;
+ }
+
rc = ena_com_allocate_customer_metrics_buffer(ena_dev);
if (rc) {
netdev_err(netdev, "ena_com_allocate_customer_metrics_buffer failed\n");
- goto err_netdev_destroy;
+ goto err_free_phc;
}
rc = ena_map_llq_mem_bar(pdev, ena_dev, bars);
@@ -3944,12 +3965,20 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_metrics_destroy;
}
+ /* Need to do this before ena_device_init */
+ devlink = ena_devlink_alloc(adapter);
+ if (!devlink) {
+ netdev_err(netdev, "ena_devlink_alloc failed\n");
+ rc = -ENOMEM;
+ goto err_metrics_destroy;
+ }
+
rc = ena_device_init(adapter, pdev, &get_feat_ctx, &wd_state);
if (rc) {
dev_err(&pdev->dev, "ENA device init failed\n");
if (rc == -ETIME)
rc = -EPROBE_DEFER;
- goto err_metrics_destroy;
+ goto ena_devlink_destroy;
}
/* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
@@ -4033,6 +4062,8 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_rss;
}
+ ena_debugfs_init(netdev);
+
INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
adapter->last_keep_alive_jiffies = jiffies;
@@ -4054,6 +4085,12 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapters_found++;
+ /* From this point, the devlink device is visible to users.
+ * Perform the registration last to ensure that all the resources
+ * are available and that the netdevice is registered.
+ */
+ ena_devlink_register(devlink, &pdev->dev);
+
return 0;
err_rss:
@@ -4070,8 +4107,12 @@ err_worker_destroy:
err_device_destroy:
ena_com_delete_host_info(ena_dev);
ena_com_admin_destroy(ena_dev);
+ena_devlink_destroy:
+ ena_devlink_free(devlink);
err_metrics_destroy:
ena_com_delete_customer_metrics_buffer(ena_dev);
+err_free_phc:
+ ena_phc_free(adapter);
err_netdev_destroy:
free_netdev(netdev);
err_free_region:
@@ -4102,6 +4143,8 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
ena_dev = adapter->ena_dev;
netdev = adapter->netdev;
+ ena_debugfs_terminate(netdev);
+
/* Make sure timer and reset routine won't be called after
* freeing device resources.
*/
@@ -4112,6 +4155,11 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN;
ena_destroy_device(adapter, true);
+ ena_phc_free(adapter);
+
+ ena_devlink_unregister(adapter->devlink);
+ ena_devlink_free(adapter->devlink);
+
if (shutdown) {
netif_device_detach(netdev);
dev_close(netdev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 6e12ae3b12e5..006f9a3acea6 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -16,6 +16,7 @@
#include <linux/skbuff.h>
#include <net/xdp.h>
#include <uapi/linux/bpf.h>
+#include <net/devlink.h>
#include "ena_com.h"
#include "ena_eth_com.h"
@@ -110,6 +111,8 @@
#define ENA_MMIO_DISABLE_REG_READ BIT(0)
+struct ena_phc_info;
+
struct ena_irq {
irq_handler_t handler;
void *data;
@@ -348,6 +351,8 @@ struct ena_adapter {
char name[ENA_NAME_MAX_LEN];
+ struct ena_phc_info *phc_info;
+
unsigned long flags;
/* TX */
struct ena_ring tx_ring[ENA_MAX_NUM_IO_QUEUES]
@@ -383,6 +388,13 @@ struct ena_adapter {
struct bpf_prog *xdp_bpf_prog;
u32 xdp_first_ring;
u32 xdp_num_queues;
+
+ struct devlink *devlink;
+ struct devlink_port devlink_port;
+#ifdef CONFIG_DEBUG_FS
+
+ struct dentry *debugfs_base;
+#endif /* CONFIG_DEBUG_FS */
};
void ena_set_ethtool_ops(struct net_device *netdev);
@@ -412,6 +424,8 @@ static inline void ena_reset_device(struct ena_adapter *adapter,
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
}
+int ena_destroy_device(struct ena_adapter *adapter, bool graceful);
+int ena_restore_device(struct ena_adapter *adapter);
int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
struct ena_tx_buffer *tx_info, bool is_xdp);
diff --git a/drivers/net/ethernet/amazon/ena/ena_phc.c b/drivers/net/ethernet/amazon/ena/ena_phc.c
new file mode 100644
index 000000000000..7867e893fd15
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_phc.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright 2015-2022 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#include <linux/pci.h>
+#include "ena_netdev.h"
+#include "ena_phc.h"
+#include "ena_devlink.h"
+
+static int ena_phc_adjtime(struct ptp_clock_info *clock_info, s64 delta)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ena_phc_adjfine(struct ptp_clock_info *clock_info, long scaled_ppm)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ena_phc_feature_enable(struct ptp_clock_info *clock_info,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ena_phc_gettimex64(struct ptp_clock_info *clock_info,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct ena_phc_info *phc_info =
+ container_of(clock_info, struct ena_phc_info, clock_info);
+ unsigned long flags;
+ u64 timestamp_nsec;
+ int rc;
+
+ spin_lock_irqsave(&phc_info->lock, flags);
+
+ ptp_read_system_prets(sts);
+
+ rc = ena_com_phc_get_timestamp(phc_info->adapter->ena_dev,
+ &timestamp_nsec);
+
+ ptp_read_system_postts(sts);
+
+ spin_unlock_irqrestore(&phc_info->lock, flags);
+
+ *ts = ns_to_timespec64(timestamp_nsec);
+
+ return rc;
+}
+
+static int ena_phc_settime64(struct ptp_clock_info *clock_info,
+ const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info ena_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .pps = 0,
+ .adjtime = ena_phc_adjtime,
+ .adjfine = ena_phc_adjfine,
+ .gettimex64 = ena_phc_gettimex64,
+ .settime64 = ena_phc_settime64,
+ .enable = ena_phc_feature_enable,
+};
+
+/* Enable/Disable PHC by the kernel, affects on the next init flow */
+void ena_phc_enable(struct ena_adapter *adapter, bool enable)
+{
+ struct ena_phc_info *phc_info = adapter->phc_info;
+
+ if (!phc_info) {
+ netdev_err(adapter->netdev, "phc_info is not allocated\n");
+ return;
+ }
+
+ phc_info->enabled = enable;
+}
+
+/* Check if PHC is enabled by the kernel */
+bool ena_phc_is_enabled(struct ena_adapter *adapter)
+{
+ struct ena_phc_info *phc_info = adapter->phc_info;
+
+ return (phc_info && phc_info->enabled);
+}
+
+/* PHC is activated if ptp clock is registered in the kernel */
+bool ena_phc_is_active(struct ena_adapter *adapter)
+{
+ struct ena_phc_info *phc_info = adapter->phc_info;
+
+ return (phc_info && phc_info->clock);
+}
+
+static int ena_phc_register(struct ena_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct ptp_clock_info *clock_info;
+ struct ena_phc_info *phc_info;
+ int rc = 0;
+
+ phc_info = adapter->phc_info;
+ clock_info = &phc_info->clock_info;
+
+ /* PHC may already be registered in case of a reset */
+ if (ena_phc_is_active(adapter))
+ return 0;
+
+ phc_info->adapter = adapter;
+
+ spin_lock_init(&phc_info->lock);
+
+ /* Fill the ptp_clock_info struct and register PTP clock */
+ *clock_info = ena_ptp_clock_info;
+ snprintf(clock_info->name,
+ sizeof(clock_info->name),
+ "ena-ptp-%02x",
+ PCI_SLOT(pdev->devfn));
+
+ phc_info->clock = ptp_clock_register(clock_info, &pdev->dev);
+ if (IS_ERR(phc_info->clock)) {
+ rc = PTR_ERR(phc_info->clock);
+ netdev_err(adapter->netdev, "Failed registering ptp clock, error: %d\n",
+ rc);
+ phc_info->clock = NULL;
+ }
+
+ return rc;
+}
+
+static void ena_phc_unregister(struct ena_adapter *adapter)
+{
+ struct ena_phc_info *phc_info = adapter->phc_info;
+
+ /* During reset flow, PHC must stay registered
+ * to keep kernel's PHC index
+ */
+ if (ena_phc_is_active(adapter) &&
+ !test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
+ ptp_clock_unregister(phc_info->clock);
+ phc_info->clock = NULL;
+ }
+}
+
+int ena_phc_alloc(struct ena_adapter *adapter)
+{
+ /* Allocate driver specific PHC info */
+ adapter->phc_info = vzalloc(sizeof(*adapter->phc_info));
+ if (unlikely(!adapter->phc_info)) {
+ netdev_err(adapter->netdev, "Failed to alloc phc_info\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void ena_phc_free(struct ena_adapter *adapter)
+{
+ if (adapter->phc_info) {
+ vfree(adapter->phc_info);
+ adapter->phc_info = NULL;
+ }
+}
+
+int ena_phc_init(struct ena_adapter *adapter)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ struct net_device *netdev = adapter->netdev;
+ int rc = -EOPNOTSUPP;
+
+ /* Validate PHC feature is supported in the device */
+ if (!ena_com_phc_supported(ena_dev)) {
+ netdev_dbg(netdev, "PHC feature is not supported by the device\n");
+ goto err_ena_com_phc_init;
+ }
+
+ /* Validate PHC feature is enabled by the kernel */
+ if (!ena_phc_is_enabled(adapter)) {
+ netdev_dbg(netdev, "PHC feature is not enabled by the kernel\n");
+ goto err_ena_com_phc_init;
+ }
+
+ /* Initialize device specific PHC info */
+ rc = ena_com_phc_init(ena_dev);
+ if (unlikely(rc)) {
+ netdev_err(netdev, "Failed to init phc, error: %d\n", rc);
+ goto err_ena_com_phc_init;
+ }
+
+ /* Configure PHC feature in driver and device */
+ rc = ena_com_phc_config(ena_dev);
+ if (unlikely(rc)) {
+ netdev_err(netdev, "Failed to config phc, error: %d\n", rc);
+ goto err_ena_com_phc_config;
+ }
+
+ /* Register to PTP class driver */
+ rc = ena_phc_register(adapter);
+ if (unlikely(rc)) {
+ netdev_err(netdev, "Failed to register phc, error: %d\n", rc);
+ goto err_ena_com_phc_config;
+ }
+
+ return 0;
+
+err_ena_com_phc_config:
+ ena_com_phc_destroy(ena_dev);
+err_ena_com_phc_init:
+ ena_phc_enable(adapter, false);
+ ena_devlink_disable_phc_param(adapter->devlink);
+ return rc;
+}
+
+void ena_phc_destroy(struct ena_adapter *adapter)
+{
+ ena_phc_unregister(adapter);
+ ena_com_phc_destroy(adapter->ena_dev);
+}
+
+int ena_phc_get_index(struct ena_adapter *adapter)
+{
+ if (ena_phc_is_active(adapter))
+ return ptp_clock_index(adapter->phc_info->clock);
+
+ return -1;
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_phc.h b/drivers/net/ethernet/amazon/ena/ena_phc.h
new file mode 100644
index 000000000000..7364fe714e44
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_phc.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright 2015-2022 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#ifndef ENA_PHC_H
+#define ENA_PHC_H
+
+#include <linux/ptp_clock_kernel.h>
+
+struct ena_phc_info {
+ /* PTP hardware capabilities */
+ struct ptp_clock_info clock_info;
+
+ /* Registered PTP clock device */
+ struct ptp_clock *clock;
+
+ /* Adapter specific private data structure */
+ struct ena_adapter *adapter;
+
+ /* PHC lock */
+ spinlock_t lock;
+
+ /* Enabled by kernel */
+ bool enabled;
+};
+
+void ena_phc_enable(struct ena_adapter *adapter, bool enable);
+bool ena_phc_is_enabled(struct ena_adapter *adapter);
+bool ena_phc_is_active(struct ena_adapter *adapter);
+int ena_phc_get_index(struct ena_adapter *adapter);
+int ena_phc_init(struct ena_adapter *adapter);
+void ena_phc_destroy(struct ena_adapter *adapter);
+int ena_phc_alloc(struct ena_adapter *adapter);
+void ena_phc_free(struct ena_adapter *adapter);
+
+#endif /* ENA_PHC_H */
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
index a2efebafd686..51068dc1cc2a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -53,6 +53,11 @@ enum ena_regs_reset_reason_types {
#define ENA_REGS_MMIO_RESP_HI_OFF 0x64
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68
+/* phc_registers offsets */
+
+/* 100 base */
+#define ENA_REGS_PHC_DB_OFF 0x100
+
/* version register */
#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff
#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8
@@ -129,4 +134,7 @@ enum ena_regs_reset_reason_types {
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000
+/* phc_db_req_id register */
+#define ENA_REGS_PHC_DB_REQ_ID_MASK 0xffff
+
#endif /* _ENA_REGS_H_ */
diff --git a/drivers/net/ethernet/amd/xgbe/Makefile b/drivers/net/ethernet/amd/xgbe/Makefile
index 620785ffbd51..5b0ab6240cf2 100644
--- a/drivers/net/ethernet/amd/xgbe/Makefile
+++ b/drivers/net/ethernet/amd/xgbe/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o
amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \
xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o \
- xgbe-ptp.o \
+ xgbe-hwtstamp.o xgbe-ptp.o \
xgbe-i2c.o xgbe-phy-v1.o xgbe-phy-v2.o \
xgbe-platform.o
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index e1296cbf4ff3..009fbc9b11ce 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -223,6 +223,10 @@
#define MAC_TSSR 0x0d20
#define MAC_TXSNR 0x0d30
#define MAC_TXSSR 0x0d34
+#define MAC_TICNR 0x0d58
+#define MAC_TICSNR 0x0d5C
+#define MAC_TECNR 0x0d60
+#define MAC_TECSNR 0x0d64
#define MAC_QTFCR_INC 4
#define MAC_MACA_INC 4
@@ -364,6 +368,10 @@
#define MAC_RCR_CST_WIDTH 1
#define MAC_RCR_DCRCC_INDEX 3
#define MAC_RCR_DCRCC_WIDTH 1
+#define MAC_RCR_GPSLCE_INDEX 6
+#define MAC_RCR_GPSLCE_WIDTH 1
+#define MAC_RCR_WD_INDEX 7
+#define MAC_RCR_WD_WIDTH 1
#define MAC_RCR_HDSMS_INDEX 12
#define MAC_RCR_HDSMS_WIDTH 3
#define MAC_RCR_IPC_INDEX 9
@@ -374,6 +382,8 @@
#define MAC_RCR_LM_WIDTH 1
#define MAC_RCR_RE_INDEX 0
#define MAC_RCR_RE_WIDTH 1
+#define MAC_RCR_GPSL_INDEX 16
+#define MAC_RCR_GPSL_WIDTH 14
#define MAC_RFCR_PFCE_INDEX 8
#define MAC_RFCR_PFCE_WIDTH 1
#define MAC_RFCR_RFE_INDEX 0
@@ -412,6 +422,8 @@
#define MAC_TCR_VNE_WIDTH 1
#define MAC_TCR_VNM_INDEX 25
#define MAC_TCR_VNM_WIDTH 1
+#define MAC_TCR_JD_INDEX 16
+#define MAC_TCR_JD_WIDTH 1
#define MAC_TIR_TNID_INDEX 0
#define MAC_TIR_TNID_WIDTH 16
#define MAC_TSCR_AV8021ASMEN_INDEX 28
@@ -420,6 +432,8 @@
#define MAC_TSCR_SNAPTYPSEL_WIDTH 2
#define MAC_TSCR_TSADDREG_INDEX 5
#define MAC_TSCR_TSADDREG_WIDTH 1
+#define MAC_TSCR_TSUPDT_INDEX 3
+#define MAC_TSCR_TSUPDT_WIDTH 1
#define MAC_TSCR_TSCFUPDT_INDEX 1
#define MAC_TSCR_TSCFUPDT_WIDTH 1
#define MAC_TSCR_TSCTRLSSR_INDEX 9
@@ -448,6 +462,10 @@
#define MAC_TSSR_TXTSC_WIDTH 1
#define MAC_TXSNR_TXTSSTSMIS_INDEX 31
#define MAC_TXSNR_TXTSSTSMIS_WIDTH 1
+#define MAC_TICSNR_TSICSNS_INDEX 8
+#define MAC_TICSNR_TSICSNS_WIDTH 8
+#define MAC_TECSNR_TSECSNS_INDEX 8
+#define MAC_TECSNR_TSECSNS_WIDTH 8
#define MAC_VLANHTR_VLHT_INDEX 0
#define MAC_VLANHTR_VLHT_WIDTH 16
#define MAC_VLANIR_VLTI_INDEX 20
@@ -1269,6 +1287,8 @@
#define MDIO_VEND2_CTRL1_SS13 BIT(13)
#endif
+#define XGBE_VEND2_MAC_AUTO_SW BIT(9)
+
/* MDIO mask values */
#define XGBE_AN_CL73_INT_CMPLT BIT(0)
#define XGBE_AN_CL73_INC_LINK BIT(1)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 466b5f6e5578..e5391a2eca51 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1558,125 +1558,6 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
DBGPR("<--rx_desc_init\n");
}
-static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
- unsigned int addend)
-{
- unsigned int count = 10000;
-
- /* Set the addend register value and tell the device */
- XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
- XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
-
- /* Wait for addend update to complete */
- while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
- udelay(5);
-
- if (!count)
- netdev_err(pdata->netdev,
- "timed out updating timestamp addend register\n");
-}
-
-static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
- unsigned int nsec)
-{
- unsigned int count = 10000;
-
- /* Set the time values and tell the device */
- XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
- XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
- XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
-
- /* Wait for time update to complete */
- while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
- udelay(5);
-
- if (!count)
- netdev_err(pdata->netdev, "timed out initializing timestamp\n");
-}
-
-static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
-{
- u64 nsec;
-
- nsec = XGMAC_IOREAD(pdata, MAC_STSR);
- nsec *= NSEC_PER_SEC;
- nsec += XGMAC_IOREAD(pdata, MAC_STNR);
-
- return nsec;
-}
-
-static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
-{
- unsigned int tx_snr, tx_ssr;
- u64 nsec;
-
- if (pdata->vdata->tx_tstamp_workaround) {
- tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
- tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
- } else {
- tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
- tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
- }
-
- if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
- return 0;
-
- nsec = tx_ssr;
- nsec *= NSEC_PER_SEC;
- nsec += tx_snr;
-
- return nsec;
-}
-
-static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
- struct xgbe_ring_desc *rdesc)
-{
- u64 nsec;
-
- if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
- !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
- nsec = le32_to_cpu(rdesc->desc1);
- nsec <<= 32;
- nsec |= le32_to_cpu(rdesc->desc0);
- if (nsec != 0xffffffffffffffffULL) {
- packet->rx_tstamp = nsec;
- XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
- RX_TSTAMP, 1);
- }
- }
-}
-
-static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
- unsigned int mac_tscr)
-{
- /* Set one nano-second accuracy */
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
-
- /* Set fine timestamp update */
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
-
- /* Overwrite earlier timestamps */
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
-
- XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
-
- /* Exit if timestamping is not enabled */
- if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
- return 0;
-
- /* Initialize time registers */
- XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
- XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
- xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
- xgbe_set_tstamp_time(pdata, 0, 0);
-
- /* Initialize the timecounter */
- timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
- ktime_to_ns(ktime_get_real()));
-
- return 0;
-}
-
static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
struct xgbe_ring *ring)
{
@@ -2850,9 +2731,19 @@ static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
{
unsigned int val;
- val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0;
-
- XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
+ if (pdata->netdev->mtu > XGMAC_JUMBO_PACKET_MTU) {
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, GPSL,
+ XGMAC_GIANT_PACKET_MTU);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, WD, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, JD, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, GPSLCE, 1);
+ } else {
+ val = pdata->netdev->mtu > XGMAC_STD_PACKET_MTU ? 1 : 0;
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, GPSLCE, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, WD, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, JD, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
+ }
}
static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
@@ -3661,13 +3552,6 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->rx_mmc_int = xgbe_rx_mmc_int;
hw_if->read_mmc_stats = xgbe_read_mmc_stats;
- /* For PTP config */
- hw_if->config_tstamp = xgbe_config_tstamp;
- hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
- hw_if->set_tstamp_time = xgbe_set_tstamp_time;
- hw_if->get_tstamp_time = xgbe_get_tstamp_time;
- hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
-
/* For Data Center Bridging config */
hw_if->config_tc = xgbe_config_tc;
hw_if->config_dcb_tc = xgbe_config_dcb_tc;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 65447f9a0a59..2e9b95a94f89 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -448,7 +448,7 @@ static void xgbe_isr_bh_work(struct work_struct *work)
if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
/* Read Tx Timestamp to clear interrupt */
pdata->tx_tstamp =
- hw_if->get_tx_tstamp(pdata);
+ xgbe_get_tx_tstamp(pdata);
queue_work(pdata->dev_workqueue,
&pdata->tx_tstamp_work);
}
@@ -1371,199 +1371,6 @@ static void xgbe_restart(struct work_struct *work)
rtnl_unlock();
}
-static void xgbe_tx_tstamp(struct work_struct *work)
-{
- struct xgbe_prv_data *pdata = container_of(work,
- struct xgbe_prv_data,
- tx_tstamp_work);
- struct skb_shared_hwtstamps hwtstamps;
- u64 nsec;
- unsigned long flags;
-
- spin_lock_irqsave(&pdata->tstamp_lock, flags);
- if (!pdata->tx_tstamp_skb)
- goto unlock;
-
- if (pdata->tx_tstamp) {
- nsec = timecounter_cyc2time(&pdata->tstamp_tc,
- pdata->tx_tstamp);
-
- memset(&hwtstamps, 0, sizeof(hwtstamps));
- hwtstamps.hwtstamp = ns_to_ktime(nsec);
- skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
- }
-
- dev_kfree_skb_any(pdata->tx_tstamp_skb);
-
- pdata->tx_tstamp_skb = NULL;
-
-unlock:
- spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
-}
-
-static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
- struct ifreq *ifreq)
-{
- if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
- sizeof(pdata->tstamp_config)))
- return -EFAULT;
-
- return 0;
-}
-
-static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
- struct ifreq *ifreq)
-{
- struct hwtstamp_config config;
- unsigned int mac_tscr;
-
- if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
- return -EFAULT;
-
- mac_tscr = 0;
-
- switch (config.tx_type) {
- case HWTSTAMP_TX_OFF:
- break;
-
- case HWTSTAMP_TX_ON:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- default:
- return -ERANGE;
- }
-
- switch (config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- break;
-
- case HWTSTAMP_FILTER_NTP_ALL:
- case HWTSTAMP_FILTER_ALL:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2, UDP, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- fallthrough; /* to PTP v1, UDP, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2, UDP, Sync packet */
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- fallthrough; /* to PTP v1, UDP, Sync packet */
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2, UDP, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- fallthrough; /* to PTP v1, UDP, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* 802.AS1, Ethernet, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* 802.AS1, Ethernet, Sync packet */
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* 802.AS1, Ethernet, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2/802.AS1, any layer, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2/802.AS1, any layer, Sync packet */
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2/802.AS1, any layer, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- default:
- return -ERANGE;
- }
-
- pdata->hw_if.config_tstamp(pdata, mac_tscr);
-
- memcpy(&pdata->tstamp_config, &config, sizeof(config));
-
- return 0;
-}
-
-static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
- struct sk_buff *skb,
- struct xgbe_packet_data *packet)
-{
- unsigned long flags;
-
- if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
- spin_lock_irqsave(&pdata->tstamp_lock, flags);
- if (pdata->tx_tstamp_skb) {
- /* Another timestamp in progress, ignore this one */
- XGMAC_SET_BITS(packet->attributes,
- TX_PACKET_ATTRIBUTES, PTP, 0);
- } else {
- pdata->tx_tstamp_skb = skb_get(skb);
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- }
- spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
- }
-
- skb_tx_timestamp(skb);
-}
-
static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
{
if (skb_vlan_tag_present(skb))
@@ -1776,6 +1583,9 @@ static int xgbe_open(struct net_device *netdev)
INIT_WORK(&pdata->stopdev_work, xgbe_stopdev);
INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
+ /* Initialize PTP timestamping and clock. */
+ xgbe_init_ptp(pdata);
+
ret = xgbe_alloc_memory(pdata);
if (ret)
goto err_ptpclk;
@@ -2546,12 +2356,8 @@ skip_data:
if (XGMAC_GET_BITS(packet->attributes,
RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
- u64 nsec;
-
- nsec = timecounter_cyc2time(&pdata->tstamp_tc,
- packet->rx_tstamp);
hwtstamps = skb_hwtstamps(skb);
- hwtstamps->hwtstamp = ns_to_ktime(nsec);
+ hwtstamps->hwtstamp = ns_to_ktime(packet->rx_tstamp);
}
if (XGMAC_GET_BITS(packet->attributes,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c b/drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c
new file mode 100644
index 000000000000..bc52e5ec6420
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
+/*
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
+ *
+ * Author: Raju Rangoju <Raju.Rangoju@amd.com>
+ */
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+void xgbe_update_tstamp_time(struct xgbe_prv_data *pdata,
+ unsigned int sec, unsigned int nsec)
+{
+ int count;
+
+ /* Set the time values and tell the device */
+ XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
+ XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
+
+ /* issue command to update the system time value */
+ XGMAC_IOWRITE(pdata, MAC_TSCR,
+ XGMAC_IOREAD(pdata, MAC_TSCR) |
+ (1 << MAC_TSCR_TSUPDT_INDEX));
+
+ /* Wait for the time adjust/update to complete */
+ count = 10000;
+ while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSUPDT))
+ udelay(5);
+
+ if (count < 0)
+ netdev_err(pdata->netdev,
+ "timed out updating system timestamp\n");
+}
+
+void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
+ unsigned int addend)
+{
+ unsigned int count = 10000;
+
+ /* Set the addend register value and tell the device */
+ XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
+
+ /* Wait for addend update to complete */
+ while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
+ udelay(5);
+
+ if (!count)
+ netdev_err(pdata->netdev,
+ "timed out updating timestamp addend register\n");
+}
+
+void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
+ unsigned int nsec)
+{
+ unsigned int count = 10000;
+
+ /* Set the time values and tell the device */
+ XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
+ XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
+
+ /* Wait for time update to complete */
+ while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
+ udelay(5);
+
+ if (!count)
+ netdev_err(pdata->netdev, "timed out initializing timestamp\n");
+}
+
+u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
+{
+ u64 nsec;
+
+ nsec = XGMAC_IOREAD(pdata, MAC_STSR);
+ nsec *= NSEC_PER_SEC;
+ nsec += XGMAC_IOREAD(pdata, MAC_STNR);
+
+ return nsec;
+}
+
+u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
+{
+ unsigned int tx_snr, tx_ssr;
+ u64 nsec;
+
+ if (pdata->vdata->tx_tstamp_workaround) {
+ tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
+ tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
+ } else {
+ tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
+ tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
+ }
+
+ if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
+ return 0;
+
+ nsec = tx_ssr;
+ nsec *= NSEC_PER_SEC;
+ nsec += tx_snr;
+
+ return nsec;
+}
+
+void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
+ struct xgbe_ring_desc *rdesc)
+{
+ u64 nsec;
+
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
+ !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
+ nsec = le32_to_cpu(rdesc->desc1);
+ nsec *= NSEC_PER_SEC;
+ nsec += le32_to_cpu(rdesc->desc0);
+ if (nsec != 0xffffffffffffffffULL) {
+ packet->rx_tstamp = nsec;
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ RX_TSTAMP, 1);
+ }
+ }
+}
+
+void xgbe_config_tstamp(struct xgbe_prv_data *pdata, unsigned int mac_tscr)
+{
+ unsigned int value = 0;
+
+ value = XGMAC_IOREAD(pdata, MAC_TSCR);
+ value |= mac_tscr;
+ XGMAC_IOWRITE(pdata, MAC_TSCR, value);
+}
+
+void xgbe_tx_tstamp(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ tx_tstamp_work);
+ struct skb_shared_hwtstamps hwtstamps;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ if (!pdata->tx_tstamp_skb)
+ goto unlock;
+
+ if (pdata->tx_tstamp) {
+ memset(&hwtstamps, 0, sizeof(hwtstamps));
+ hwtstamps.hwtstamp = ns_to_ktime(pdata->tx_tstamp);
+ skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
+ }
+
+ dev_kfree_skb_any(pdata->tx_tstamp_skb);
+
+ pdata->tx_tstamp_skb = NULL;
+
+unlock:
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+}
+
+int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata, struct ifreq *ifreq)
+{
+ if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
+ sizeof(pdata->tstamp_config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, struct ifreq *ifreq)
+{
+ struct hwtstamp_config config;
+ unsigned int mac_tscr;
+
+ if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ mac_tscr = 0;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ break;
+
+ case HWTSTAMP_TX_ON:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+
+ case HWTSTAMP_FILTER_NTP_ALL:
+ case HWTSTAMP_FILTER_ALL:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2, UDP, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ fallthrough; /* to PTP v1, UDP, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+ /* PTP v2, UDP, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ fallthrough; /* to PTP v1, UDP, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2, UDP, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ fallthrough; /* to PTP v1, UDP, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ xgbe_config_tstamp(pdata, mac_tscr);
+
+ memcpy(&pdata->tstamp_config, &config, sizeof(config));
+
+ return 0;
+}
+
+void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
+ struct sk_buff *skb,
+ struct xgbe_packet_data *packet)
+{
+ unsigned long flags;
+
+ if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ if (pdata->tx_tstamp_skb) {
+ /* Another timestamp in progress, ignore this one */
+ XGMAC_SET_BITS(packet->attributes,
+ TX_PACKET_ATTRIBUTES, PTP, 0);
+ } else {
+ pdata->tx_tstamp_skb = skb_get(skb);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ }
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+ }
+
+ skb_tx_timestamp(skb);
+}
+
+int xgbe_init_ptp(struct xgbe_prv_data *pdata)
+{
+ unsigned int mac_tscr = 0;
+ struct timespec64 now;
+ u64 dividend;
+
+ /* Register Settings to be done based on the link speed. */
+ switch (pdata->phy.speed) {
+ case SPEED_1000:
+ XGMAC_IOWRITE(pdata, MAC_TICNR, MAC_TICNR_1G_INITVAL);
+ XGMAC_IOWRITE(pdata, MAC_TECNR, MAC_TECNR_1G_INITVAL);
+ break;
+ case SPEED_2500:
+ case SPEED_10000:
+ XGMAC_IOWRITE_BITS(pdata, MAC_TICSNR, TSICSNS,
+ MAC_TICSNR_10G_INITVAL);
+ XGMAC_IOWRITE(pdata, MAC_TECNR, MAC_TECNR_10G_INITVAL);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TECSNR, TSECSNS,
+ MAC_TECSNR_10G_INITVAL);
+ break;
+ case SPEED_UNKNOWN:
+ default:
+ break;
+ }
+
+ /* Enable IEEE1588 PTP clock. */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+
+ /* Overwrite earlier timestamps */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
+
+ /* Set one nano-second accuracy */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
+
+ /* Set fine timestamp update */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
+
+ xgbe_config_tstamp(pdata, mac_tscr);
+
+ /* Exit if timestamping is not enabled */
+ if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
+ return -EOPNOTSUPP;
+
+ if (pdata->vdata->tstamp_ptp_clock_freq) {
+ /* Initialize time registers based on
+ * 125MHz PTP Clock Frequency
+ */
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC,
+ XGBE_V2_TSTAMP_SSINC);
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC,
+ XGBE_V2_TSTAMP_SNSINC);
+ } else {
+ /* Initialize time registers based on
+ * 50MHz PTP Clock Frequency
+ */
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
+ }
+
+ /* Calculate the addend:
+ * addend = 2^32 / (PTP ref clock / (PTP clock based on SSINC))
+ * = (2^32 * (PTP clock based on SSINC)) / PTP ref clock
+ */
+ if (pdata->vdata->tstamp_ptp_clock_freq)
+ dividend = XGBE_V2_PTP_ACT_CLK_FREQ;
+ else
+ dividend = XGBE_PTP_ACT_CLK_FREQ;
+
+ dividend = (u64)(dividend << 32);
+ pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate);
+
+ xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
+
+ dma_wmb();
+ /* initialize system time */
+ ktime_get_real_ts64(&now);
+
+ /* lower 32 bits of tv_sec are safe until y2106 */
+ xgbe_set_tstamp_time(pdata, (u32)now.tv_sec, now.tv_nsec);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 4ebdd123c435..d1f0419edb23 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -275,7 +275,7 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->min_mtu = 0;
- netdev->max_mtu = XGMAC_JUMBO_PACKET_MTU;
+ netdev->max_mtu = XGMAC_GIANT_PACKET_MTU - XGBE_ETH_FRAME_HDR;
/* Use default watchdog timeout */
netdev->watchdog_timeo = 0;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 71449edbb76d..1a37ec45e650 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -266,6 +266,10 @@ static void xgbe_an37_set(struct xgbe_prv_data *pdata, bool enable,
reg |= MDIO_VEND2_CTRL1_AN_RESTART;
XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_CTRL1, reg);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_PCS_DIG_CTRL);
+ reg |= XGBE_VEND2_MAC_AUTO_SW;
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_PCS_DIG_CTRL, reg);
}
static void xgbe_an37_restart(struct xgbe_prv_data *pdata)
@@ -894,6 +898,11 @@ static void xgbe_an37_init(struct xgbe_prv_data *pdata)
netif_dbg(pdata, link, pdata->netdev, "CL37 AN (%s) initialized\n",
(pdata->an_mode == XGBE_AN_MODE_CL37) ? "BaseX" : "SGMII");
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1);
+ reg &= ~MDIO_AN_CTRL1_ENABLE;
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg);
+
}
static void xgbe_an73_init(struct xgbe_prv_data *pdata)
@@ -1295,6 +1304,10 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
pdata->phy.link = pdata->phy_if.phy_impl.link_status(pdata,
&an_restart);
+ /* bail out if the link status register read fails */
+ if (pdata->phy.link < 0)
+ return;
+
if (an_restart) {
xgbe_phy_config_aneg(pdata);
goto adjust_link;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index 097ec5e4f261..e3e1dca9856a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -414,6 +414,7 @@ static struct xgbe_version_data xgbe_v2a = {
.tx_max_fifo_size = 229376,
.rx_max_fifo_size = 229376,
.tx_tstamp_workaround = 1,
+ .tstamp_ptp_clock_freq = 1,
.ecc_support = 1,
.i2c_support = 1,
.irq_reissue_support = 1,
@@ -430,6 +431,7 @@ static struct xgbe_version_data xgbe_v2b = {
.tx_max_fifo_size = 65536,
.rx_max_fifo_size = 65536,
.tx_tstamp_workaround = 1,
+ .tstamp_ptp_clock_freq = 1,
.ecc_support = 1,
.i2c_support = 1,
.irq_reissue_support = 1,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 7a4dfa4e19c7..23c39e92e783 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -2746,8 +2746,7 @@ static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int reg;
- int ret;
+ int reg, ret;
*an_restart = 0;
@@ -2781,11 +2780,20 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
return 0;
}
- /* Link status is latched low, so read once to clear
- * and then read again to get current state
- */
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ if (reg < 0)
+ return reg;
+
+ /* Link status is latched low so that momentary link drops
+ * can be detected. If link was already down read again
+ * to get the latest state.
+ */
+
+ if (!pdata->phy.link && !(reg & MDIO_STAT1_LSTATUS)) {
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ if (reg < 0)
+ return reg;
+ }
if (pdata->en_rx_adap) {
/* if the link is available and adaptation is done,
@@ -2804,9 +2812,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
xgbe_phy_set_mode(pdata, phy_data->cur_mode);
}
- /* check again for the link and adaptation status */
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
- if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done)
+ if (pdata->rx_adapt_done)
return 1;
} else if (reg & MDIO_STAT1_LSTATUS)
return 1;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
index 978c4dd01fa0..3658afc7801d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
@@ -13,18 +13,6 @@
#include "xgbe.h"
#include "xgbe-common.h"
-static u64 xgbe_cc_read(const struct cyclecounter *cc)
-{
- struct xgbe_prv_data *pdata = container_of(cc,
- struct xgbe_prv_data,
- tstamp_cc);
- u64 nsec;
-
- nsec = pdata->hw_if.get_tstamp_time(pdata);
-
- return nsec;
-}
-
static int xgbe_adjfine(struct ptp_clock_info *info, long scaled_ppm)
{
struct xgbe_prv_data *pdata = container_of(info,
@@ -37,7 +25,7 @@ static int xgbe_adjfine(struct ptp_clock_info *info, long scaled_ppm)
spin_lock_irqsave(&pdata->tstamp_lock, flags);
- pdata->hw_if.update_tstamp_addend(pdata, addend);
+ xgbe_update_tstamp_addend(pdata, addend);
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
@@ -49,16 +37,39 @@ static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta)
struct xgbe_prv_data *pdata = container_of(info,
struct xgbe_prv_data,
ptp_clock_info);
+ unsigned int neg_adjust = 0;
+ unsigned int sec, nsec;
+ u32 quotient, reminder;
unsigned long flags;
+ if (delta < 0) {
+ neg_adjust = 1;
+ delta = -delta;
+ }
+
+ quotient = div_u64_rem(delta, 1000000000ULL, &reminder);
+ sec = quotient;
+ nsec = reminder;
+
+ /* Negative adjustment for Hw timer register. */
+ if (neg_adjust) {
+ sec = -sec;
+ if (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSCTRLSSR))
+ nsec = (1000000000UL - nsec);
+ else
+ nsec = (0x80000000UL - nsec);
+ }
+ nsec = (neg_adjust << 31) | nsec;
+
spin_lock_irqsave(&pdata->tstamp_lock, flags);
- timecounter_adjtime(&pdata->tstamp_tc, delta);
+ xgbe_update_tstamp_time(pdata, sec, nsec);
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
return 0;
}
-static int xgbe_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
+static int xgbe_gettimex(struct ptp_clock_info *info, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct xgbe_prv_data *pdata = container_of(info,
struct xgbe_prv_data,
@@ -67,9 +78,9 @@ static int xgbe_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
u64 nsec;
spin_lock_irqsave(&pdata->tstamp_lock, flags);
-
- nsec = timecounter_read(&pdata->tstamp_tc);
-
+ ptp_read_system_prets(sts);
+ nsec = xgbe_get_tstamp_time(pdata);
+ ptp_read_system_postts(sts);
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
*ts = ns_to_timespec64(nsec);
@@ -84,14 +95,9 @@ static int xgbe_settime(struct ptp_clock_info *info,
struct xgbe_prv_data,
ptp_clock_info);
unsigned long flags;
- u64 nsec;
-
- nsec = timespec64_to_ns(ts);
spin_lock_irqsave(&pdata->tstamp_lock, flags);
-
- timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec);
-
+ xgbe_set_tstamp_time(pdata, ts->tv_sec, ts->tv_nsec);
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
return 0;
@@ -107,8 +113,6 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
{
struct ptp_clock_info *info = &pdata->ptp_clock_info;
struct ptp_clock *clock;
- struct cyclecounter *cc = &pdata->tstamp_cc;
- u64 dividend;
snprintf(info->name, sizeof(info->name), "%s",
netdev_name(pdata->netdev));
@@ -116,7 +120,7 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
info->max_adj = pdata->ptpclk_rate;
info->adjfine = xgbe_adjfine;
info->adjtime = xgbe_adjtime;
- info->gettime64 = xgbe_gettime;
+ info->gettimex64 = xgbe_gettimex;
info->settime64 = xgbe_settime;
info->enable = xgbe_enable;
@@ -128,23 +132,6 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
pdata->ptp_clock = clock;
- /* Calculate the addend:
- * addend = 2^32 / (PTP ref clock / 50Mhz)
- * = (2^32 * 50Mhz) / PTP ref clock
- */
- dividend = 50000000;
- dividend <<= 32;
- pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate);
-
- /* Setup the timecounter */
- cc->read = xgbe_cc_read;
- cc->mask = CLOCKSOURCE_MASK(64);
- cc->mult = 1;
- cc->shift = 0;
-
- timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
- ktime_to_ns(ktime_get_real()));
-
/* Disable all timestamping to start */
XGMAC_IOWRITE(pdata, MAC_TSCR, 0);
pdata->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 6359bb87dc13..d7e03e292ec4 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -80,11 +80,13 @@
#define XGBE_IRQ_MODE_EDGE 0
#define XGBE_IRQ_MODE_LEVEL 1
+#define XGBE_ETH_FRAME_HDR (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
#define XGMAC_MIN_PACKET 60
#define XGMAC_STD_PACKET_MTU 1500
#define XGMAC_MAX_STD_PACKET 1518
#define XGMAC_JUMBO_PACKET_MTU 9000
#define XGMAC_MAX_JUMBO_PACKET 9018
+#define XGMAC_GIANT_PACKET_MTU 16368
#define XGMAC_ETH_PREAMBLE (12 + 8) /* Inter-frame gap + preamble */
#define XGMAC_PFC_DATA_LEN 46
@@ -117,6 +119,14 @@
#define XGBE_MSI_BASE_COUNT 4
#define XGBE_MSI_MIN_COUNT (XGBE_MSI_BASE_COUNT + 1)
+/* Initial PTP register values based on Link Speed. */
+#define MAC_TICNR_1G_INITVAL 0x10
+#define MAC_TECNR_1G_INITVAL 0x28
+
+#define MAC_TICSNR_10G_INITVAL 0x33
+#define MAC_TECNR_10G_INITVAL 0x14
+#define MAC_TECSNR_10G_INITVAL 0xCC
+
/* PCI clock frequencies */
#define XGBE_V2_DMA_CLOCK_FREQ 500000000 /* 500 MHz */
#define XGBE_V2_PTP_CLOCK_FREQ 125000000 /* 125 MHz */
@@ -126,6 +136,11 @@
*/
#define XGBE_TSTAMP_SSINC 20
#define XGBE_TSTAMP_SNSINC 0
+#define XGBE_PTP_ACT_CLK_FREQ 500000000
+
+#define XGBE_V2_TSTAMP_SSINC 0xA
+#define XGBE_V2_TSTAMP_SNSINC 0
+#define XGBE_V2_PTP_ACT_CLK_FREQ 1000000000
/* Driver PMT macros */
#define XGMAC_DRIVER_CONTEXT 1
@@ -183,12 +198,12 @@
#define XGBE_LINK_TIMEOUT 5
#define XGBE_KR_TRAINING_WAIT_ITER 50
-#define XGBE_SGMII_AN_LINK_STATUS BIT(1)
+#define XGBE_SGMII_AN_LINK_DUPLEX BIT(1)
#define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
#define XGBE_SGMII_AN_LINK_SPEED_10 0x00
#define XGBE_SGMII_AN_LINK_SPEED_100 0x04
#define XGBE_SGMII_AN_LINK_SPEED_1000 0x08
-#define XGBE_SGMII_AN_LINK_DUPLEX BIT(4)
+#define XGBE_SGMII_AN_LINK_STATUS BIT(4)
/* ECC correctable error notification window (seconds) */
#define XGBE_ECC_LIMIT 60
@@ -739,14 +754,6 @@ struct xgbe_hw_if {
void (*tx_mmc_int)(struct xgbe_prv_data *);
void (*read_mmc_stats)(struct xgbe_prv_data *);
- /* For Timestamp config */
- int (*config_tstamp)(struct xgbe_prv_data *, unsigned int);
- void (*update_tstamp_addend)(struct xgbe_prv_data *, unsigned int);
- void (*set_tstamp_time)(struct xgbe_prv_data *, unsigned int sec,
- unsigned int nsec);
- u64 (*get_tstamp_time)(struct xgbe_prv_data *);
- u64 (*get_tx_tstamp)(struct xgbe_prv_data *);
-
/* For Data Center Bridging config */
void (*config_tc)(struct xgbe_prv_data *);
void (*config_dcb_tc)(struct xgbe_prv_data *);
@@ -944,6 +951,7 @@ struct xgbe_version_data {
unsigned int tx_max_fifo_size;
unsigned int rx_max_fifo_size;
unsigned int tx_tstamp_workaround;
+ unsigned int tstamp_ptp_clock_freq;
unsigned int ecc_support;
unsigned int i2c_support;
unsigned int irq_reissue_support;
@@ -1129,8 +1137,6 @@ struct xgbe_prv_data {
struct ptp_clock_info ptp_clock_info;
struct ptp_clock *ptp_clock;
struct hwtstamp_config tstamp_config;
- struct cyclecounter tstamp_cc;
- struct timecounter tstamp_tc;
unsigned int tstamp_addend;
struct work_struct tx_tstamp_work;
struct sk_buff *tx_tstamp_skb;
@@ -1275,6 +1281,29 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *);
void xgbe_restart_dev(struct xgbe_prv_data *pdata);
void xgbe_full_restart_dev(struct xgbe_prv_data *pdata);
+/* For Timestamp config */
+void xgbe_config_tstamp(struct xgbe_prv_data *pdata, unsigned int mac_tscr);
+u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata);
+u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata);
+void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
+ struct xgbe_ring_desc *rdesc);
+void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
+ struct xgbe_ring_desc *rdesc);
+void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
+ unsigned int addend);
+void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
+ unsigned int nsec);
+void xgbe_tx_tstamp(struct work_struct *work);
+int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
+ struct ifreq *ifreq);
+int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
+ struct ifreq *ifreq);
+void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
+ struct sk_buff *skb,
+ struct xgbe_packet_data *packet);
+int xgbe_init_ptp(struct xgbe_prv_data *pdata);
+void xgbe_update_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
+ unsigned int nsec);
#ifdef CONFIG_DEBUG_FS
void xgbe_debugfs_init(struct xgbe_prv_data *);
void xgbe_debugfs_exit(struct xgbe_prv_data *);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 42c0efc1b455..4e66fd9b2ab1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -113,6 +113,8 @@ struct aq_stats_s {
#define AQ_HW_POWER_STATE_D0 0U
#define AQ_HW_POWER_STATE_D3 3U
+#define AQ_FW_WAKE_ON_LINK_RTPM BIT(10)
+
#define AQ_HW_FLAG_STARTED 0x00000004U
#define AQ_HW_FLAG_STOPPING 0x00000008U
#define AQ_HW_FLAG_RESETTING 0x00000010U
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 08630ee94251..ed5231dece3f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -463,7 +463,7 @@ static const struct dev_pm_ops aq_pm_ops = {
};
#endif
-static struct pci_driver aq_pci_ops = {
+static struct pci_driver aq_pci_driver = {
.name = AQ_CFG_DRV_NAME,
.id_table = aq_pci_tbl,
.probe = aq_pci_probe,
@@ -476,11 +476,11 @@ static struct pci_driver aq_pci_ops = {
int aq_pci_func_register_driver(void)
{
- return pci_register_driver(&aq_pci_ops);
+ return pci_register_driver(&aq_pci_driver);
}
void aq_pci_func_unregister_driver(void)
{
- pci_unregister_driver(&aq_pci_ops);
+ pci_unregister_driver(&aq_pci_driver);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
index 52e2070a4a2f..7370e3f76b62 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
@@ -462,6 +462,44 @@ static int aq_a2_fw_get_mac_temp(struct aq_hw_s *self, int *temp)
return aq_a2_fw_get_phy_temp(self, temp);
}
+static int aq_a2_fw_set_wol_params(struct aq_hw_s *self, const u8 *mac, u32 wol)
+{
+ struct mac_address_aligned_s mac_address;
+ struct link_control_s link_control;
+ struct wake_on_lan_s wake_on_lan;
+
+ memcpy(mac_address.aligned.mac_address, mac, ETH_ALEN);
+ hw_atl2_shared_buffer_write(self, mac_address, mac_address);
+
+ memset(&wake_on_lan, 0, sizeof(wake_on_lan));
+
+ if (wol & WAKE_MAGIC)
+ wake_on_lan.wake_on_magic_packet = 1U;
+
+ if (wol & (WAKE_PHY | AQ_FW_WAKE_ON_LINK_RTPM))
+ wake_on_lan.wake_on_link_up = 1U;
+
+ hw_atl2_shared_buffer_write(self, sleep_proxy, wake_on_lan);
+
+ hw_atl2_shared_buffer_get(self, link_control, link_control);
+ link_control.mode = AQ_HOST_MODE_SLEEP_PROXY;
+ hw_atl2_shared_buffer_write(self, link_control, link_control);
+
+ return hw_atl2_shared_buffer_finish_ack(self);
+}
+
+static int aq_a2_fw_set_power(struct aq_hw_s *self, unsigned int power_state,
+ const u8 *mac)
+{
+ u32 wol = self->aq_nic_cfg->wol;
+ int err = 0;
+
+ if (wol)
+ err = aq_a2_fw_set_wol_params(self, mac, wol);
+
+ return err;
+}
+
static int aq_a2_fw_set_eee_rate(struct aq_hw_s *self, u32 speed)
{
struct link_options_s link_options;
@@ -605,6 +643,7 @@ const struct aq_fw_ops aq_a2_fw_ops = {
.set_state = aq_a2_fw_set_state,
.update_link_status = aq_a2_fw_update_link_status,
.update_stats = aq_a2_fw_update_stats,
+ .set_power = aq_a2_fw_set_power,
.get_mac_temp = aq_a2_fw_get_mac_temp,
.get_phy_temp = aq_a2_fw_get_phy_temp,
.set_eee_rate = aq_a2_fw_set_eee_rate,
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index d8e6f23e1432..cbc730c7cff2 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1213,6 +1213,11 @@ static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf,
buf->rx.rx_buf = data;
buf->rx.dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(&ag->pdev->dev, buf->rx.dma_addr)) {
+ skb_free_frag(data);
+ buf->rx.rx_buf = NULL;
+ return false;
+ }
desc->data = (u32)buf->rx.dma_addr + offset;
return true;
}
@@ -1511,6 +1516,10 @@ static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&ag->pdev->dev, dma_addr)) {
+ netif_dbg(ag, tx_err, ndev, "DMA mapping error\n");
+ goto err_drop;
+ }
i = ring->curr & ring_mask;
desc = ag71xx_ring_desc(ring, i);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index ef1a51347351..7efa3fc257b3 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2688,7 +2688,7 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->mii.mdio_write = atl1c_mdio_write;
adapter->mii.phy_id_mask = 0x1f;
adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK;
- dev_set_threaded(netdev, true);
+ netif_threaded_enable(netdev);
for (i = 0; i < adapter->rx_queue_count; ++i)
netif_napi_add(netdev, &adapter->rrd_ring[i].napi,
atl1c_clean_rx);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index cfdb546a09e7..98a4d089270e 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -1861,14 +1861,21 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
break;
}
- buffer_info->alloced = 1;
- buffer_info->skb = skb;
- buffer_info->length = (u16) adapter->rx_buffer_len;
page = virt_to_page(skb->data);
offset = offset_in_page(skb->data);
buffer_info->dma = dma_map_page(&pdev->dev, page, offset,
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
+ kfree_skb(skb);
+ adapter->soft_stats.rx_dropped++;
+ break;
+ }
+
+ buffer_info->alloced = 1;
+ buffer_info->skb = skb;
+ buffer_info->length = (u16)adapter->rx_buffer_len;
+
rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len);
rfd_desc->coalese = 0;
@@ -2183,8 +2190,8 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
return 0;
}
-static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
- struct tx_packet_desc *ptpd)
+static bool atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
+ struct tx_packet_desc *ptpd)
{
struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
struct atl1_buffer *buffer_info;
@@ -2194,6 +2201,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
unsigned int nr_frags;
unsigned int f;
int retval;
+ u16 first_mapped;
u16 next_to_use;
u16 data_len;
u8 hdr_len;
@@ -2201,6 +2209,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buf_len -= skb->data_len;
nr_frags = skb_shinfo(skb)->nr_frags;
next_to_use = atomic_read(&tpd_ring->next_to_use);
+ first_mapped = next_to_use;
buffer_info = &tpd_ring->buffer_info[next_to_use];
BUG_ON(buffer_info->skb);
/* put skb in last TPD */
@@ -2216,6 +2225,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buffer_info->dma = dma_map_page(&adapter->pdev->dev, page,
offset, hdr_len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))
+ goto dma_err;
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
@@ -2242,6 +2253,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
page, offset,
buffer_info->length,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ buffer_info->dma))
+ goto dma_err;
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
}
@@ -2254,6 +2268,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buffer_info->dma = dma_map_page(&adapter->pdev->dev, page,
offset, buf_len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))
+ goto dma_err;
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
}
@@ -2277,6 +2293,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev,
frag, i * ATL1_MAX_TX_BUF_LEN,
buffer_info->length, DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ buffer_info->dma))
+ goto dma_err;
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
@@ -2285,6 +2304,22 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
/* last tpd's buffer-info */
buffer_info->skb = skb;
+
+ return true;
+
+ dma_err:
+ while (first_mapped != next_to_use) {
+ buffer_info = &tpd_ring->buffer_info[first_mapped];
+ dma_unmap_page(&adapter->pdev->dev,
+ buffer_info->dma,
+ buffer_info->length,
+ DMA_TO_DEVICE);
+ buffer_info->dma = 0;
+
+ if (++first_mapped == tpd_ring->count)
+ first_mapped = 0;
+ }
+ return false;
}
static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count,
@@ -2355,10 +2390,8 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
len = skb_headlen(skb);
- if (unlikely(skb->len <= 0)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
+ if (unlikely(skb->len <= 0))
+ goto drop_packet;
nr_frags = skb_shinfo(skb)->nr_frags;
for (f = 0; f < nr_frags; f++) {
@@ -2371,10 +2404,9 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
if (mss) {
if (skb->protocol == htons(ETH_P_IP)) {
proto_hdr_len = skb_tcp_all_headers(skb);
- if (unlikely(proto_hdr_len > len)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
+ if (unlikely(proto_hdr_len > len))
+ goto drop_packet;
+
/* need additional TPD ? */
if (proto_hdr_len != len)
count += (len - proto_hdr_len +
@@ -2406,23 +2438,26 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
}
tso = atl1_tso(adapter, skb, ptpd);
- if (tso < 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
+ if (tso < 0)
+ goto drop_packet;
if (!tso) {
ret_val = atl1_tx_csum(adapter, skb, ptpd);
- if (ret_val < 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
+ if (ret_val < 0)
+ goto drop_packet;
}
- atl1_tx_map(adapter, skb, ptpd);
+ if (!atl1_tx_map(adapter, skb, ptpd))
+ goto drop_packet;
+
atl1_tx_queue(adapter, count, ptpd);
atl1_update_mailbox(adapter);
return NETDEV_TX_OK;
+
+drop_packet:
+ adapter->soft_stats.tx_errors++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
}
static int atl1_rings_clean(struct napi_struct *napi, int budget)
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 81a74e07464f..0fc10e6c6902 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -253,6 +253,15 @@ config BNXT_HWMON
Say Y if you want to expose the thermal sensor data on NetXtreme-C/E
devices, via the hwmon sysfs interface.
+config BNGE
+ tristate "Broadcom Ethernet device support"
+ depends on PCI
+ select NET_DEVLINK
+ help
+ This driver supports Broadcom 50/100/200/400/800 gigabit Ethernet cards.
+ The module will be called bng_en. To compile this driver as a module,
+ choose M here.
+
config BCMASP
tristate "Broadcom ASP 2.0 Ethernet support"
depends on ARCH_BRCMSTB || COMPILE_TEST
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index bac5cb6ad0cd..10cc1c92ecfc 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -18,3 +18,4 @@ obj-$(CONFIG_BGMAC_PLATFORM) += bgmac-platform.o
obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
obj-$(CONFIG_BNXT) += bnxt/
obj-$(CONFIG_BCMASP) += asp2/
+obj-$(CONFIG_BNGE) += bnge/
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
index 4381a4cfd8c6..63f1a8c3a7fb 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
@@ -430,4 +430,5 @@ const struct ethtool_ops bcmasp_ethtool_ops = {
.get_ethtool_stats = bcmasp_get_ethtool_stats,
.get_sset_count = bcmasp_get_sset_count,
.get_ts_info = ethtool_op_get_ts_info,
+ .nway_reset = phy_ethtool_nway_reset,
};
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index 0d61b8580d72..b9973956c480 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -605,10 +605,8 @@ next:
bcmasp_intf_rx_desc_write(intf, intf->rx_edpkt_dma_read);
- if (processed < budget) {
- napi_complete_done(&intf->rx_napi, processed);
+ if (processed < budget && napi_complete_done(&intf->rx_napi, processed))
bcmasp_enable_rx_irq(intf, 1);
- }
return processed;
}
@@ -818,6 +816,9 @@ static void bcmasp_init_tx(struct bcmasp_intf *intf)
/* Tx SPB */
tx_spb_ctrl_wl(intf, ((intf->channel + 8) << TX_SPB_CTRL_XF_BID_SHIFT),
TX_SPB_CTRL_XF_CTRL2);
+
+ if (intf->parent->tx_chan_offset)
+ tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR);
tx_spb_top_wl(intf, 0x1e, TX_SPB_TOP_BLKOUT);
tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_READ);
@@ -1281,6 +1282,8 @@ struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
ndev->hw_features |= ndev->features;
ndev->needed_headroom += sizeof(struct bcmasp_pkt_offload);
+ netdev_sw_irq_coalesce_default_on(ndev);
+
return intf;
err_free_netdev:
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 8267417b3750..0353359c3fe9 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2570,7 +2570,7 @@ static int __init b44_init(void)
unsigned int dma_desc_align_size = dma_get_cache_alignment();
int err;
- /* Setup paramaters for syncing RX/TX DMA descriptors */
+ /* Setup parameters for syncing RX/TX DMA descriptors */
dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
err = b44_pci_init();
diff --git a/drivers/net/ethernet/broadcom/bnge/Makefile b/drivers/net/ethernet/broadcom/bnge/Makefile
new file mode 100644
index 000000000000..6142d9c57f49
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_BNGE) += bng_en.o
+
+bng_en-y := bnge_core.o \
+ bnge_devlink.o \
+ bnge_hwrm.o \
+ bnge_hwrm_lib.o \
+ bnge_rmem.o \
+ bnge_resc.o \
+ bnge_netdev.o \
+ bnge_ethtool.o
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge.h b/drivers/net/ethernet/broadcom/bnge/bnge.h
new file mode 100644
index 000000000000..6fb3683b6b04
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_H_
+#define _BNGE_H_
+
+#define DRV_NAME "bng_en"
+#define DRV_SUMMARY "Broadcom 800G Ethernet Linux Driver"
+
+#include <linux/etherdevice.h>
+#include <linux/bnxt/hsi.h>
+#include "bnge_rmem.h"
+#include "bnge_resc.h"
+
+#define DRV_VER_MAJ 1
+#define DRV_VER_MIN 15
+#define DRV_VER_UPD 1
+
+extern char bnge_driver_name[];
+
+enum board_idx {
+ BCM57708,
+};
+
+struct bnge_pf_info {
+ u16 fw_fid;
+ u16 port_id;
+ u8 mac_addr[ETH_ALEN];
+};
+
+#define INVALID_HW_RING_ID ((u16)-1)
+
+enum {
+ BNGE_FW_CAP_SHORT_CMD = BIT_ULL(0),
+ BNGE_FW_CAP_LLDP_AGENT = BIT_ULL(1),
+ BNGE_FW_CAP_DCBX_AGENT = BIT_ULL(2),
+ BNGE_FW_CAP_IF_CHANGE = BIT_ULL(3),
+ BNGE_FW_CAP_KONG_MB_CHNL = BIT_ULL(4),
+ BNGE_FW_CAP_ERROR_RECOVERY = BIT_ULL(5),
+ BNGE_FW_CAP_PKG_VER = BIT_ULL(6),
+ BNGE_FW_CAP_CFA_ADV_FLOW = BIT_ULL(7),
+ BNGE_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 = BIT_ULL(8),
+ BNGE_FW_CAP_PCIE_STATS_SUPPORTED = BIT_ULL(9),
+ BNGE_FW_CAP_EXT_STATS_SUPPORTED = BIT_ULL(10),
+ BNGE_FW_CAP_ERR_RECOVER_RELOAD = BIT_ULL(11),
+ BNGE_FW_CAP_HOT_RESET = BIT_ULL(12),
+ BNGE_FW_CAP_RX_ALL_PKT_TS = BIT_ULL(13),
+ BNGE_FW_CAP_VLAN_RX_STRIP = BIT_ULL(14),
+ BNGE_FW_CAP_VLAN_TX_INSERT = BIT_ULL(15),
+ BNGE_FW_CAP_EXT_HW_STATS_SUPPORTED = BIT_ULL(16),
+ BNGE_FW_CAP_LIVEPATCH = BIT_ULL(17),
+ BNGE_FW_CAP_HOT_RESET_IF = BIT_ULL(18),
+ BNGE_FW_CAP_RING_MONITOR = BIT_ULL(19),
+ BNGE_FW_CAP_DBG_QCAPS = BIT_ULL(20),
+ BNGE_FW_CAP_THRESHOLD_TEMP_SUPPORTED = BIT_ULL(21),
+ BNGE_FW_CAP_DFLT_VLAN_TPID_PCP = BIT_ULL(22),
+ BNGE_FW_CAP_VNIC_TUNNEL_TPA = BIT_ULL(23),
+ BNGE_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO = BIT_ULL(24),
+ BNGE_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 = BIT_ULL(25),
+ BNGE_FW_CAP_VNIC_RE_FLUSH = BIT_ULL(26),
+};
+
+enum {
+ BNGE_EN_ROCE_V1 = BIT_ULL(0),
+ BNGE_EN_ROCE_V2 = BIT_ULL(1),
+ BNGE_EN_STRIP_VLAN = BIT_ULL(2),
+ BNGE_EN_SHARED_CHNL = BIT_ULL(3),
+ BNGE_EN_UDP_GSO_SUPP = BIT_ULL(4),
+};
+
+#define BNGE_EN_ROCE (BNGE_EN_ROCE_V1 | BNGE_EN_ROCE_V2)
+
+enum {
+ BNGE_RSS_CAP_RSS_HASH_TYPE_DELTA = BIT(0),
+ BNGE_RSS_CAP_UDP_RSS_CAP = BIT(1),
+ BNGE_RSS_CAP_NEW_RSS_CAP = BIT(2),
+ BNGE_RSS_CAP_RSS_TCAM = BIT(3),
+ BNGE_RSS_CAP_AH_V4_RSS_CAP = BIT(4),
+ BNGE_RSS_CAP_AH_V6_RSS_CAP = BIT(5),
+ BNGE_RSS_CAP_ESP_V4_RSS_CAP = BIT(6),
+ BNGE_RSS_CAP_ESP_V6_RSS_CAP = BIT(7),
+};
+
+#define BNGE_MAX_QUEUE 8
+struct bnge_queue_info {
+ u8 queue_id;
+ u8 queue_profile;
+};
+
+struct bnge_dev {
+ struct device *dev;
+ struct pci_dev *pdev;
+ struct net_device *netdev;
+ u64 dsn;
+#define BNGE_VPD_FLD_LEN 32
+ char board_partno[BNGE_VPD_FLD_LEN];
+ char board_serialno[BNGE_VPD_FLD_LEN];
+
+ void __iomem *bar0;
+ void __iomem *bar1;
+
+ u16 chip_num;
+ u8 chip_rev;
+
+ int db_offset; /* db_offset within db_size */
+ int db_size;
+
+ /* HWRM members */
+ u16 hwrm_cmd_seq;
+ u16 hwrm_cmd_kong_seq;
+ struct dma_pool *hwrm_dma_pool;
+ struct hlist_head hwrm_pending_list;
+ u16 hwrm_max_req_len;
+ u16 hwrm_max_ext_req_len;
+ unsigned int hwrm_cmd_timeout;
+ unsigned int hwrm_cmd_max_timeout;
+ struct mutex hwrm_cmd_lock; /* serialize hwrm messages */
+
+ struct hwrm_ver_get_output ver_resp;
+#define FW_VER_STR_LEN 32
+ char fw_ver_str[FW_VER_STR_LEN];
+ char hwrm_ver_supp[FW_VER_STR_LEN];
+ char nvm_cfg_ver[FW_VER_STR_LEN];
+ u64 fw_ver_code;
+#define BNGE_FW_VER_CODE(maj, min, bld, rsv) \
+ ((u64)(maj) << 48 | (u64)(min) << 32 | (u64)(bld) << 16 | (rsv))
+
+ struct bnge_pf_info pf;
+
+ unsigned long state;
+#define BNGE_STATE_DRV_REGISTERED 0
+
+ u64 fw_cap;
+
+ /* Backing stores */
+ struct bnge_ctx_mem_info *ctx;
+
+ u64 flags;
+
+ struct bnge_hw_resc hw_resc;
+
+ u16 tso_max_segs;
+
+ int max_fltr;
+#define BNGE_L2_FLTR_MAX_FLTR 1024
+
+ u32 *rss_indir_tbl;
+#define BNGE_RSS_TABLE_ENTRIES 64
+#define BNGE_RSS_TABLE_SIZE (BNGE_RSS_TABLE_ENTRIES * 4)
+#define BNGE_RSS_TABLE_MAX_TBL 8
+#define BNGE_MAX_RSS_TABLE_SIZE \
+ (BNGE_RSS_TABLE_SIZE * BNGE_RSS_TABLE_MAX_TBL)
+#define BNGE_MAX_RSS_TABLE_ENTRIES \
+ (BNGE_RSS_TABLE_ENTRIES * BNGE_RSS_TABLE_MAX_TBL)
+ u16 rss_indir_tbl_entries;
+
+ u32 rss_cap;
+
+ u16 rx_nr_rings;
+ u16 tx_nr_rings;
+ u16 tx_nr_rings_per_tc;
+ /* Number of NQs */
+ u16 nq_nr_rings;
+
+ /* Aux device resources */
+ u16 aux_num_msix;
+ u16 aux_num_stat_ctxs;
+
+ u16 max_mtu;
+#define BNGE_MAX_MTU 9500
+
+ u16 hw_ring_stats_size;
+#define BNGE_NUM_RX_RING_STATS 8
+#define BNGE_NUM_TX_RING_STATS 8
+#define BNGE_NUM_TPA_RING_STATS 6
+#define BNGE_RING_STATS_SIZE \
+ ((BNGE_NUM_RX_RING_STATS + BNGE_NUM_TX_RING_STATS + \
+ BNGE_NUM_TPA_RING_STATS) * 8)
+
+ u16 max_tpa_v2;
+#define BNGE_SUPPORTS_TPA(bd) ((bd)->max_tpa_v2)
+
+ u8 num_tc;
+ u8 max_tc;
+ u8 max_lltc; /* lossless TCs */
+ struct bnge_queue_info q_info[BNGE_MAX_QUEUE];
+ u8 tc_to_qidx[BNGE_MAX_QUEUE];
+ u8 q_ids[BNGE_MAX_QUEUE];
+ u8 max_q;
+ u8 port_count;
+
+ struct bnge_irq *irq_tbl;
+ u16 irqs_acquired;
+};
+
+static inline bool bnge_is_roce_en(struct bnge_dev *bd)
+{
+ return bd->flags & BNGE_EN_ROCE;
+}
+
+static inline bool bnge_is_agg_reqd(struct bnge_dev *bd)
+{
+ if (bd->netdev) {
+ struct bnge_net *bn = netdev_priv(bd->netdev);
+
+ if (bn->priv_flags & BNGE_NET_EN_TPA ||
+ bn->priv_flags & BNGE_NET_EN_JUMBO)
+ return true;
+ else
+ return false;
+ }
+
+ return true;
+}
+
+bool bnge_aux_registered(struct bnge_dev *bd);
+
+#endif /* _BNGE_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_core.c b/drivers/net/ethernet/broadcom/bnge/bnge_core.c
new file mode 100644
index 000000000000..68da656f2894
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_core.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/init.h>
+#include <linux/crash_dump.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "bnge.h"
+#include "bnge_devlink.h"
+#include "bnge_hwrm.h"
+#include "bnge_hwrm_lib.h"
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRV_SUMMARY);
+
+char bnge_driver_name[] = DRV_NAME;
+
+static const struct {
+ char *name;
+} board_info[] = {
+ [BCM57708] = { "Broadcom BCM57708 50Gb/100Gb/200Gb/400Gb/800Gb Ethernet" },
+};
+
+static const struct pci_device_id bnge_pci_tbl[] = {
+ { PCI_VDEVICE(BROADCOM, 0x1780), .driver_data = BCM57708 },
+ /* Required last entry */
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, bnge_pci_tbl);
+
+static void bnge_print_device_info(struct pci_dev *pdev, enum board_idx idx)
+{
+ struct device *dev = &pdev->dev;
+
+ dev_info(dev, "%s found at mem %lx\n", board_info[idx].name,
+ (long)pci_resource_start(pdev, 0));
+
+ pcie_print_link_status(pdev);
+}
+
+bool bnge_aux_registered(struct bnge_dev *bd)
+{
+ return false;
+}
+
+static void bnge_nvm_cfg_ver_get(struct bnge_dev *bd)
+{
+ struct hwrm_nvm_get_dev_info_output nvm_info;
+
+ if (!bnge_hwrm_nvm_dev_info(bd, &nvm_info))
+ snprintf(bd->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
+ nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
+ nvm_info.nvm_cfg_ver_upd);
+}
+
+static int bnge_func_qcaps(struct bnge_dev *bd)
+{
+ int rc;
+
+ rc = bnge_hwrm_func_qcaps(bd);
+ if (rc)
+ return rc;
+
+ rc = bnge_hwrm_queue_qportcfg(bd);
+ if (rc) {
+ dev_err(bd->dev, "query qportcfg failure rc: %d\n", rc);
+ return rc;
+ }
+
+ rc = bnge_hwrm_func_resc_qcaps(bd);
+ if (rc) {
+ dev_err(bd->dev, "query resc caps failure rc: %d\n", rc);
+ return rc;
+ }
+
+ rc = bnge_hwrm_func_qcfg(bd);
+ if (rc) {
+ dev_err(bd->dev, "query config failure rc: %d\n", rc);
+ return rc;
+ }
+
+ rc = bnge_hwrm_vnic_qcaps(bd);
+ if (rc) {
+ dev_err(bd->dev, "vnic caps failure rc: %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void bnge_fw_unregister_dev(struct bnge_dev *bd)
+{
+ /* ctx mem free after unrgtr only */
+ bnge_hwrm_func_drv_unrgtr(bd);
+ bnge_free_ctx_mem(bd);
+}
+
+static int bnge_fw_register_dev(struct bnge_dev *bd)
+{
+ int rc;
+
+ bd->fw_cap = 0;
+ rc = bnge_hwrm_ver_get(bd);
+ if (rc) {
+ dev_err(bd->dev, "Get Version command failed rc: %d\n", rc);
+ return rc;
+ }
+
+ bnge_nvm_cfg_ver_get(bd);
+
+ rc = bnge_hwrm_func_reset(bd);
+ if (rc) {
+ dev_err(bd->dev, "Failed to reset function rc: %d\n", rc);
+ return rc;
+ }
+
+ bnge_hwrm_fw_set_time(bd);
+
+ rc = bnge_hwrm_func_drv_rgtr(bd);
+ if (rc) {
+ dev_err(bd->dev, "Failed to rgtr with firmware rc: %d\n", rc);
+ return rc;
+ }
+
+ rc = bnge_alloc_ctx_mem(bd);
+ if (rc) {
+ dev_err(bd->dev, "Failed to allocate ctx mem rc: %d\n", rc);
+ goto err_func_unrgtr;
+ }
+
+ /* Get the resources and configuration from firmware */
+ rc = bnge_func_qcaps(bd);
+ if (rc) {
+ dev_err(bd->dev, "Failed initial configuration rc: %d\n", rc);
+ rc = -ENODEV;
+ goto err_func_unrgtr;
+ }
+
+ return 0;
+
+err_func_unrgtr:
+ bnge_fw_unregister_dev(bd);
+ return rc;
+}
+
+static void bnge_pci_disable(struct pci_dev *pdev)
+{
+ pci_release_regions(pdev);
+ if (pci_is_enabled(pdev))
+ pci_disable_device(pdev);
+}
+
+static int bnge_pci_enable(struct pci_dev *pdev)
+{
+ int rc;
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+ return rc;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev,
+ "Cannot find PCI device base address, aborting\n");
+ rc = -ENODEV;
+ goto err_pci_disable;
+ }
+
+ rc = pci_request_regions(pdev, bnge_driver_name);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
+ goto err_pci_disable;
+ }
+
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+
+ pci_set_master(pdev);
+
+ return 0;
+
+err_pci_disable:
+ pci_disable_device(pdev);
+ return rc;
+}
+
+static void bnge_unmap_bars(struct pci_dev *pdev)
+{
+ struct bnge_dev *bd = pci_get_drvdata(pdev);
+
+ if (bd->bar1) {
+ pci_iounmap(pdev, bd->bar1);
+ bd->bar1 = NULL;
+ }
+
+ if (bd->bar0) {
+ pci_iounmap(pdev, bd->bar0);
+ bd->bar0 = NULL;
+ }
+}
+
+static void bnge_set_max_func_irqs(struct bnge_dev *bd,
+ unsigned int max_irqs)
+{
+ bd->hw_resc.max_irqs = max_irqs;
+}
+
+static int bnge_get_max_irq(struct pci_dev *pdev)
+{
+ u16 ctrl;
+
+ pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
+ return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
+}
+
+static int bnge_map_db_bar(struct bnge_dev *bd)
+{
+ if (!bd->db_size)
+ return -ENODEV;
+
+ bd->bar1 = pci_iomap(bd->pdev, 2, bd->db_size);
+ if (!bd->bar1)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int bnge_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ unsigned int max_irqs;
+ struct bnge_dev *bd;
+ int rc;
+
+ if (pci_is_bridge(pdev))
+ return -ENODEV;
+
+ if (!pdev->msix_cap) {
+ dev_err(&pdev->dev, "MSIX capability missing, aborting\n");
+ return -ENODEV;
+ }
+
+ if (is_kdump_kernel()) {
+ pci_clear_master(pdev);
+ pcie_flr(pdev);
+ }
+
+ rc = bnge_pci_enable(pdev);
+ if (rc)
+ return rc;
+
+ bnge_print_device_info(pdev, ent->driver_data);
+
+ bd = bnge_devlink_alloc(pdev);
+ if (!bd) {
+ dev_err(&pdev->dev, "Devlink allocation failed\n");
+ rc = -ENOMEM;
+ goto err_pci_disable;
+ }
+
+ bd->bar0 = pci_ioremap_bar(pdev, 0);
+ if (!bd->bar0) {
+ dev_err(&pdev->dev, "Failed mapping BAR-0, aborting\n");
+ rc = -ENOMEM;
+ goto err_devl_free;
+ }
+
+ rc = bnge_init_hwrm_resources(bd);
+ if (rc)
+ goto err_bar_unmap;
+
+ rc = bnge_fw_register_dev(bd);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to register with firmware rc = %d\n", rc);
+ goto err_hwrm_cleanup;
+ }
+
+ bnge_devlink_register(bd);
+
+ max_irqs = bnge_get_max_irq(pdev);
+ bnge_set_max_func_irqs(bd, max_irqs);
+
+ bnge_aux_init_dflt_config(bd);
+
+ rc = bnge_net_init_dflt_config(bd);
+ if (rc) {
+ dev_err(&pdev->dev, "Error setting up default cfg to netdev rc = %d\n",
+ rc);
+ goto err_fw_reg;
+ }
+
+ rc = bnge_map_db_bar(bd);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed mapping doorbell BAR rc = %d, aborting\n",
+ rc);
+ goto err_config_uninit;
+ }
+
+ rc = bnge_alloc_irqs(bd);
+ if (rc) {
+ dev_err(&pdev->dev, "Error IRQ allocation rc = %d\n", rc);
+ goto err_config_uninit;
+ }
+
+ rc = bnge_netdev_alloc(bd, max_irqs);
+ if (rc)
+ goto err_free_irq;
+
+ pci_save_state(pdev);
+
+ return 0;
+
+err_free_irq:
+ bnge_free_irqs(bd);
+
+err_config_uninit:
+ bnge_net_uninit_dflt_config(bd);
+
+err_fw_reg:
+ bnge_devlink_unregister(bd);
+ bnge_fw_unregister_dev(bd);
+
+err_hwrm_cleanup:
+ bnge_cleanup_hwrm_resources(bd);
+
+err_bar_unmap:
+ bnge_unmap_bars(pdev);
+
+err_devl_free:
+ bnge_devlink_free(bd);
+
+err_pci_disable:
+ bnge_pci_disable(pdev);
+ return rc;
+}
+
+static void bnge_remove_one(struct pci_dev *pdev)
+{
+ struct bnge_dev *bd = pci_get_drvdata(pdev);
+
+ bnge_netdev_free(bd);
+
+ bnge_free_irqs(bd);
+
+ bnge_net_uninit_dflt_config(bd);
+
+ bnge_devlink_unregister(bd);
+
+ bnge_fw_unregister_dev(bd);
+
+ bnge_cleanup_hwrm_resources(bd);
+
+ bnge_unmap_bars(pdev);
+
+ bnge_devlink_free(bd);
+
+ bnge_pci_disable(pdev);
+}
+
+static void bnge_shutdown(struct pci_dev *pdev)
+{
+ pci_disable_device(pdev);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, 0);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+static struct pci_driver bnge_driver = {
+ .name = bnge_driver_name,
+ .id_table = bnge_pci_tbl,
+ .probe = bnge_probe_one,
+ .remove = bnge_remove_one,
+ .shutdown = bnge_shutdown,
+};
+
+static int __init bnge_init_module(void)
+{
+ return pci_register_driver(&bnge_driver);
+}
+module_init(bnge_init_module);
+
+static void __exit bnge_exit_module(void)
+{
+ pci_unregister_driver(&bnge_driver);
+}
+module_exit(bnge_exit_module);
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_devlink.c b/drivers/net/ethernet/broadcom/bnge/bnge_devlink.c
new file mode 100644
index 000000000000..a987afebd64d
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_devlink.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/unaligned.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <net/devlink.h>
+
+#include "bnge.h"
+#include "bnge_devlink.h"
+#include "bnge_hwrm_lib.h"
+
+static int bnge_dl_info_put(struct bnge_dev *bd, struct devlink_info_req *req,
+ enum bnge_dl_version_type type, const char *key,
+ char *buf)
+{
+ if (!strlen(buf))
+ return 0;
+
+ if (!strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_NCSI) ||
+ !strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_ROCE))
+ return 0;
+
+ switch (type) {
+ case BNGE_VERSION_FIXED:
+ return devlink_info_version_fixed_put(req, key, buf);
+ case BNGE_VERSION_RUNNING:
+ return devlink_info_version_running_put(req, key, buf);
+ case BNGE_VERSION_STORED:
+ return devlink_info_version_stored_put(req, key, buf);
+ }
+
+ return 0;
+}
+
+static void bnge_vpd_read_info(struct bnge_dev *bd)
+{
+ struct pci_dev *pdev = bd->pdev;
+ unsigned int vpd_size, kw_len;
+ int pos, size;
+ u8 *vpd_data;
+
+ vpd_data = pci_vpd_alloc(pdev, &vpd_size);
+ if (IS_ERR(vpd_data)) {
+ pci_warn(pdev, "Unable to read VPD\n");
+ return;
+ }
+
+ pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
+ if (pos < 0)
+ goto read_sn;
+
+ size = min_t(int, kw_len, BNGE_VPD_FLD_LEN - 1);
+ memcpy(bd->board_partno, &vpd_data[pos], size);
+
+read_sn:
+ pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_SERIALNO,
+ &kw_len);
+ if (pos < 0)
+ goto exit;
+
+ size = min_t(int, kw_len, BNGE_VPD_FLD_LEN - 1);
+ memcpy(bd->board_serialno, &vpd_data[pos], size);
+
+exit:
+ kfree(vpd_data);
+}
+
+#define HWRM_FW_VER_STR_LEN 16
+
+static int bnge_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct hwrm_nvm_get_dev_info_output nvm_dev_info;
+ struct bnge_dev *bd = devlink_priv(devlink);
+ struct hwrm_ver_get_output *ver_resp;
+ char mgmt_ver[FW_VER_STR_LEN];
+ char roce_ver[FW_VER_STR_LEN];
+ char ncsi_ver[FW_VER_STR_LEN];
+ char buf[32];
+
+ int rc;
+
+ if (bd->dsn) {
+ char buf[32];
+ u8 dsn[8];
+ int rc;
+
+ put_unaligned_le64(bd->dsn, dsn);
+ sprintf(buf, "%02X-%02X-%02X-%02X-%02X-%02X-%02X-%02X",
+ dsn[7], dsn[6], dsn[5], dsn[4],
+ dsn[3], dsn[2], dsn[1], dsn[0]);
+ rc = devlink_info_serial_number_put(req, buf);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set dsn");
+ return rc;
+ }
+ }
+
+ if (strlen(bd->board_serialno)) {
+ rc = devlink_info_board_serial_number_put(req,
+ bd->board_serialno);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set board serial number");
+ return rc;
+ }
+ }
+
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_FIXED,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_ID,
+ bd->board_partno);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set board part number");
+ return rc;
+ }
+
+ /* More information from HWRM ver get command */
+ sprintf(buf, "%X", bd->chip_num);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_FIXED,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, buf);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set asic id");
+ return rc;
+ }
+
+ ver_resp = &bd->ver_resp;
+ sprintf(buf, "%c%d", 'A' + ver_resp->chip_rev, ver_resp->chip_metal);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_FIXED,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, buf);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set asic info");
+ return rc;
+ }
+
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
+ bd->nvm_cfg_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set firmware version");
+ return rc;
+ }
+
+ buf[0] = 0;
+ strncat(buf, ver_resp->active_pkg_name, HWRM_FW_VER_STR_LEN);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW, buf);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set firmware generic version");
+ return rc;
+ }
+
+ if (ver_resp->flags & VER_GET_RESP_FLAGS_EXT_VER_AVAIL) {
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->hwrm_fw_major, ver_resp->hwrm_fw_minor,
+ ver_resp->hwrm_fw_build, ver_resp->hwrm_fw_patch);
+
+ snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->mgmt_fw_major, ver_resp->mgmt_fw_minor,
+ ver_resp->mgmt_fw_build, ver_resp->mgmt_fw_patch);
+
+ snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->roce_fw_major, ver_resp->roce_fw_minor,
+ ver_resp->roce_fw_build, ver_resp->roce_fw_patch);
+ } else {
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->hwrm_fw_maj_8b, ver_resp->hwrm_fw_min_8b,
+ ver_resp->hwrm_fw_bld_8b, ver_resp->hwrm_fw_rsvd_8b);
+
+ snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->mgmt_fw_maj_8b, ver_resp->mgmt_fw_min_8b,
+ ver_resp->mgmt_fw_bld_8b, ver_resp->mgmt_fw_rsvd_8b);
+
+ snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->roce_fw_maj_8b, ver_resp->roce_fw_min_8b,
+ ver_resp->roce_fw_bld_8b, ver_resp->roce_fw_rsvd_8b);
+ }
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set firmware mgmt version");
+ return rc;
+ }
+
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API,
+ bd->hwrm_ver_supp);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set firmware mgmt api version");
+ return rc;
+ }
+
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, ncsi_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set ncsi firmware version");
+ return rc;
+ }
+
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set roce firmware version");
+ return rc;
+ }
+
+ rc = bnge_hwrm_nvm_dev_info(bd, &nvm_dev_info);
+ if (!(nvm_dev_info.flags & NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID))
+ return 0;
+
+ buf[0] = 0;
+ strncat(buf, nvm_dev_info.pkg_name, HWRM_FW_VER_STR_LEN);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW, buf);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set roce firmware version");
+ return rc;
+ }
+
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ nvm_dev_info.hwrm_fw_major, nvm_dev_info.hwrm_fw_minor,
+ nvm_dev_info.hwrm_fw_build, nvm_dev_info.hwrm_fw_patch);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set stored firmware version");
+ return rc;
+ }
+
+ snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ nvm_dev_info.mgmt_fw_major, nvm_dev_info.mgmt_fw_minor,
+ nvm_dev_info.mgmt_fw_build, nvm_dev_info.mgmt_fw_patch);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, ncsi_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set stored ncsi firmware version");
+ return rc;
+ }
+
+ snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ nvm_dev_info.roce_fw_major, nvm_dev_info.roce_fw_minor,
+ nvm_dev_info.roce_fw_build, nvm_dev_info.roce_fw_patch);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
+ if (rc)
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set stored roce firmware version");
+
+ return rc;
+}
+
+static const struct devlink_ops bnge_devlink_ops = {
+ .info_get = bnge_devlink_info_get,
+};
+
+void bnge_devlink_free(struct bnge_dev *bd)
+{
+ struct devlink *devlink = priv_to_devlink(bd);
+
+ devlink_free(devlink);
+}
+
+struct bnge_dev *bnge_devlink_alloc(struct pci_dev *pdev)
+{
+ struct devlink *devlink;
+ struct bnge_dev *bd;
+
+ devlink = devlink_alloc(&bnge_devlink_ops, sizeof(*bd), &pdev->dev);
+ if (!devlink)
+ return NULL;
+
+ bd = devlink_priv(devlink);
+ pci_set_drvdata(pdev, bd);
+ bd->dev = &pdev->dev;
+ bd->pdev = pdev;
+
+ bd->dsn = pci_get_dsn(pdev);
+ if (!bd->dsn)
+ pci_warn(pdev, "Failed to get DSN\n");
+
+ bnge_vpd_read_info(bd);
+
+ return bd;
+}
+
+void bnge_devlink_register(struct bnge_dev *bd)
+{
+ struct devlink *devlink = priv_to_devlink(bd);
+ devlink_register(devlink);
+}
+
+void bnge_devlink_unregister(struct bnge_dev *bd)
+{
+ struct devlink *devlink = priv_to_devlink(bd);
+ devlink_unregister(devlink);
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_devlink.h b/drivers/net/ethernet/broadcom/bnge/bnge_devlink.h
new file mode 100644
index 000000000000..c6575255e650
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_devlink.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_DEVLINK_H_
+#define _BNGE_DEVLINK_H_
+
+enum bnge_dl_version_type {
+ BNGE_VERSION_FIXED,
+ BNGE_VERSION_RUNNING,
+ BNGE_VERSION_STORED,
+};
+
+void bnge_devlink_free(struct bnge_dev *bd);
+struct bnge_dev *bnge_devlink_alloc(struct pci_dev *pdev);
+void bnge_devlink_register(struct bnge_dev *bd);
+void bnge_devlink_unregister(struct bnge_dev *bd);
+
+#endif /* _BNGE_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.c b/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.c
new file mode 100644
index 000000000000..569371c1b4f2
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/unaligned.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <net/devlink.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool_netlink.h>
+
+#include "bnge.h"
+#include "bnge_ethtool.h"
+
+static void bnge_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct bnge_net *bn = netdev_priv(dev);
+ struct bnge_dev *bd = bn->bd;
+
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->fw_version, bd->fw_ver_str, sizeof(info->fw_version));
+ strscpy(info->bus_info, pci_name(bd->pdev), sizeof(info->bus_info));
+}
+
+static const struct ethtool_ops bnge_ethtool_ops = {
+ .get_drvinfo = bnge_get_drvinfo,
+};
+
+void bnge_set_ethtool_ops(struct net_device *dev)
+{
+ dev->ethtool_ops = &bnge_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.h b/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.h
new file mode 100644
index 000000000000..21e96a0976d5
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_ETHTOOL_H_
+#define _BNGE_ETHTOOL_H_
+
+void bnge_set_ethtool_ops(struct net_device *dev);
+
+#endif /* _BNGE_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c
new file mode 100644
index 000000000000..0f971af24142
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c
@@ -0,0 +1,508 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <asm/byteorder.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+
+#include "bnge.h"
+#include "bnge_hwrm.h"
+
+static u64 bnge_cal_sentinel(struct bnge_hwrm_ctx *ctx, u16 req_type)
+{
+ return (((uintptr_t)ctx) + req_type) ^ BNGE_HWRM_SENTINEL;
+}
+
+int bnge_hwrm_req_create(struct bnge_dev *bd, void **req, u16 req_type,
+ u32 req_len)
+{
+ struct bnge_hwrm_ctx *ctx;
+ dma_addr_t dma_handle;
+ u8 *req_addr;
+
+ if (req_len > BNGE_HWRM_CTX_OFFSET)
+ return -E2BIG;
+
+ req_addr = dma_pool_alloc(bd->hwrm_dma_pool, GFP_KERNEL | __GFP_ZERO,
+ &dma_handle);
+ if (!req_addr)
+ return -ENOMEM;
+
+ ctx = (struct bnge_hwrm_ctx *)(req_addr + BNGE_HWRM_CTX_OFFSET);
+ /* safety first, sentinel used to check for invalid requests */
+ ctx->sentinel = bnge_cal_sentinel(ctx, req_type);
+ ctx->req_len = req_len;
+ ctx->req = (struct input *)req_addr;
+ ctx->resp = (struct output *)(req_addr + BNGE_HWRM_RESP_OFFSET);
+ ctx->dma_handle = dma_handle;
+ ctx->flags = 0; /* __GFP_ZERO, but be explicit regarding ownership */
+ ctx->timeout = bd->hwrm_cmd_timeout ?: BNGE_DFLT_HWRM_CMD_TIMEOUT;
+ ctx->allocated = BNGE_HWRM_DMA_SIZE - BNGE_HWRM_CTX_OFFSET;
+ ctx->gfp = GFP_KERNEL;
+ ctx->slice_addr = NULL;
+
+ /* initialize common request fields */
+ ctx->req->req_type = cpu_to_le16(req_type);
+ ctx->req->resp_addr = cpu_to_le64(dma_handle + BNGE_HWRM_RESP_OFFSET);
+ ctx->req->cmpl_ring = cpu_to_le16(BNGE_HWRM_NO_CMPL_RING);
+ ctx->req->target_id = cpu_to_le16(BNGE_HWRM_TARGET);
+ *req = ctx->req;
+
+ return 0;
+}
+
+static struct bnge_hwrm_ctx *__hwrm_ctx_get(struct bnge_dev *bd, u8 *req_addr)
+{
+ void *ctx_addr = req_addr + BNGE_HWRM_CTX_OFFSET;
+ struct input *req = (struct input *)req_addr;
+ struct bnge_hwrm_ctx *ctx = ctx_addr;
+ u64 sentinel;
+
+ if (!req) {
+ dev_err(bd->dev, "null HWRM request");
+ dump_stack();
+ return NULL;
+ }
+
+ /* HWRM API has no type safety, verify sentinel to validate address */
+ sentinel = bnge_cal_sentinel(ctx, le16_to_cpu(req->req_type));
+ if (ctx->sentinel != sentinel) {
+ dev_err(bd->dev, "HWRM sentinel mismatch, req_type = %u\n",
+ (u32)le16_to_cpu(req->req_type));
+ dump_stack();
+ return NULL;
+ }
+
+ return ctx;
+}
+
+void bnge_hwrm_req_timeout(struct bnge_dev *bd,
+ void *req, unsigned int timeout)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+
+ if (ctx)
+ ctx->timeout = timeout;
+}
+
+void bnge_hwrm_req_alloc_flags(struct bnge_dev *bd, void *req, gfp_t gfp)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+
+ if (ctx)
+ ctx->gfp = gfp;
+}
+
+void bnge_hwrm_req_flags(struct bnge_dev *bd, void *req,
+ enum bnge_hwrm_ctx_flags flags)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+
+ if (ctx)
+ ctx->flags |= (flags & BNGE_HWRM_API_FLAGS);
+}
+
+void *bnge_hwrm_req_hold(struct bnge_dev *bd, void *req)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+ struct input *input = (struct input *)req;
+
+ if (!ctx)
+ return NULL;
+
+ if (ctx->flags & BNGE_HWRM_INTERNAL_CTX_OWNED) {
+ dev_err(bd->dev, "HWRM context already owned, req_type = %u\n",
+ (u32)le16_to_cpu(input->req_type));
+ dump_stack();
+ return NULL;
+ }
+
+ ctx->flags |= BNGE_HWRM_INTERNAL_CTX_OWNED;
+ return ((u8 *)req) + BNGE_HWRM_RESP_OFFSET;
+}
+
+static void __hwrm_ctx_invalidate(struct bnge_dev *bd,
+ struct bnge_hwrm_ctx *ctx)
+{
+ void *addr = ((u8 *)ctx) - BNGE_HWRM_CTX_OFFSET;
+ dma_addr_t dma_handle = ctx->dma_handle; /* save before invalidate */
+
+ /* unmap any auxiliary DMA slice */
+ if (ctx->slice_addr)
+ dma_free_coherent(bd->dev, ctx->slice_size,
+ ctx->slice_addr, ctx->slice_handle);
+
+ /* invalidate, ensure ownership, sentinel and dma_handle are cleared */
+ memset(ctx, 0, sizeof(struct bnge_hwrm_ctx));
+
+ /* return the buffer to the DMA pool */
+ if (dma_handle)
+ dma_pool_free(bd->hwrm_dma_pool, addr, dma_handle);
+}
+
+void bnge_hwrm_req_drop(struct bnge_dev *bd, void *req)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+
+ if (ctx)
+ __hwrm_ctx_invalidate(bd, ctx);
+}
+
+static int bnge_map_hwrm_error(u32 hwrm_err)
+{
+ switch (hwrm_err) {
+ case HWRM_ERR_CODE_SUCCESS:
+ return 0;
+ case HWRM_ERR_CODE_RESOURCE_LOCKED:
+ return -EROFS;
+ case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
+ return -EACCES;
+ case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
+ return -ENOSPC;
+ case HWRM_ERR_CODE_INVALID_PARAMS:
+ case HWRM_ERR_CODE_INVALID_FLAGS:
+ case HWRM_ERR_CODE_INVALID_ENABLES:
+ case HWRM_ERR_CODE_UNSUPPORTED_TLV:
+ case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
+ return -EINVAL;
+ case HWRM_ERR_CODE_NO_BUFFER:
+ return -ENOMEM;
+ case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
+ case HWRM_ERR_CODE_BUSY:
+ return -EAGAIN;
+ case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ case HWRM_ERR_CODE_PF_UNAVAILABLE:
+ return -ENODEV;
+ default:
+ return -EIO;
+ }
+}
+
+static struct bnge_hwrm_wait_token *
+bnge_hwrm_create_token(struct bnge_dev *bd, enum bnge_hwrm_chnl dst)
+{
+ struct bnge_hwrm_wait_token *token;
+
+ token = kzalloc(sizeof(*token), GFP_KERNEL);
+ if (!token)
+ return NULL;
+
+ mutex_lock(&bd->hwrm_cmd_lock);
+
+ token->dst = dst;
+ token->state = BNGE_HWRM_PENDING;
+ if (dst == BNGE_HWRM_CHNL_CHIMP) {
+ token->seq_id = bd->hwrm_cmd_seq++;
+ hlist_add_head_rcu(&token->node, &bd->hwrm_pending_list);
+ } else {
+ token->seq_id = bd->hwrm_cmd_kong_seq++;
+ }
+
+ return token;
+}
+
+static void
+bnge_hwrm_destroy_token(struct bnge_dev *bd, struct bnge_hwrm_wait_token *token)
+{
+ if (token->dst == BNGE_HWRM_CHNL_CHIMP) {
+ hlist_del_rcu(&token->node);
+ kfree_rcu(token, rcu);
+ } else {
+ kfree(token);
+ }
+ mutex_unlock(&bd->hwrm_cmd_lock);
+}
+
+static void bnge_hwrm_req_dbg(struct bnge_dev *bd, struct input *req)
+{
+ u32 ring = le16_to_cpu(req->cmpl_ring);
+ u32 type = le16_to_cpu(req->req_type);
+ u32 tgt = le16_to_cpu(req->target_id);
+ u32 seq = le16_to_cpu(req->seq_id);
+ char opt[32] = "\n";
+
+ if (unlikely(ring != (u16)BNGE_HWRM_NO_CMPL_RING))
+ snprintf(opt, 16, " ring %d\n", ring);
+
+ if (unlikely(tgt != BNGE_HWRM_TARGET))
+ snprintf(opt + strlen(opt) - 1, 16, " tgt 0x%x\n", tgt);
+
+ dev_dbg(bd->dev, "sent hwrm req_type 0x%x seq id 0x%x%s",
+ type, seq, opt);
+}
+
+#define bnge_hwrm_err(bd, ctx, fmt, ...) \
+ do { \
+ if ((ctx)->flags & BNGE_HWRM_CTX_SILENT) \
+ dev_dbg((bd)->dev, fmt, __VA_ARGS__); \
+ else \
+ dev_err((bd)->dev, fmt, __VA_ARGS__); \
+ } while (0)
+
+static int __hwrm_send_ctx(struct bnge_dev *bd, struct bnge_hwrm_ctx *ctx)
+{
+ u32 doorbell_offset = BNGE_GRCPF_REG_CHIMP_COMM_TRIGGER;
+ enum bnge_hwrm_chnl dst = BNGE_HWRM_CHNL_CHIMP;
+ u32 bar_offset = BNGE_GRCPF_REG_CHIMP_COMM;
+ struct bnge_hwrm_wait_token *token = NULL;
+ u16 max_req_len = BNGE_HWRM_MAX_REQ_LEN;
+ unsigned int i, timeout, tmo_count;
+ u32 *data = (u32 *)ctx->req;
+ u32 msg_len = ctx->req_len;
+ int rc = -EBUSY;
+ u32 req_type;
+ u16 len = 0;
+ u8 *valid;
+
+ if (ctx->flags & BNGE_HWRM_INTERNAL_RESP_DIRTY)
+ memset(ctx->resp, 0, PAGE_SIZE);
+
+ req_type = le16_to_cpu(ctx->req->req_type);
+
+ if (msg_len > BNGE_HWRM_MAX_REQ_LEN &&
+ msg_len > bd->hwrm_max_ext_req_len) {
+ dev_warn(bd->dev, "oversized hwrm request, req_type 0x%x",
+ req_type);
+ rc = -E2BIG;
+ goto exit;
+ }
+
+ token = bnge_hwrm_create_token(bd, dst);
+ if (!token) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+ ctx->req->seq_id = cpu_to_le16(token->seq_id);
+
+ /* Ensure any associated DMA buffers are written before doorbell */
+ wmb();
+
+ /* Write request msg to hwrm channel */
+ __iowrite32_copy(bd->bar0 + bar_offset, data, msg_len / 4);
+
+ for (i = msg_len; i < max_req_len; i += 4)
+ writel(0, bd->bar0 + bar_offset + i);
+
+ /* Ring channel doorbell */
+ writel(1, bd->bar0 + doorbell_offset);
+
+ bnge_hwrm_req_dbg(bd, ctx->req);
+
+ /* Limit timeout to an upper limit */
+ timeout = min(ctx->timeout,
+ bd->hwrm_cmd_max_timeout ?: BNGE_HWRM_CMD_MAX_TIMEOUT);
+ /* convert timeout to usec */
+ timeout *= 1000;
+
+ i = 0;
+ /* Short timeout for the first few iterations:
+ * number of loops = number of loops for short timeout +
+ * number of loops for standard timeout.
+ */
+ tmo_count = BNGE_HWRM_SHORT_TIMEOUT_COUNTER;
+ timeout = timeout - BNGE_HWRM_SHORT_MIN_TIMEOUT *
+ BNGE_HWRM_SHORT_TIMEOUT_COUNTER;
+ tmo_count += DIV_ROUND_UP(timeout, BNGE_HWRM_MIN_TIMEOUT);
+
+ if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) {
+ /* Wait until hwrm response cmpl interrupt is processed */
+ while (READ_ONCE(token->state) < BNGE_HWRM_COMPLETE &&
+ i++ < tmo_count) {
+ /* on first few passes, just barely sleep */
+ if (i < BNGE_HWRM_SHORT_TIMEOUT_COUNTER) {
+ usleep_range(BNGE_HWRM_SHORT_MIN_TIMEOUT,
+ BNGE_HWRM_SHORT_MAX_TIMEOUT);
+ } else {
+ usleep_range(BNGE_HWRM_MIN_TIMEOUT,
+ BNGE_HWRM_MAX_TIMEOUT);
+ }
+ }
+
+ if (READ_ONCE(token->state) != BNGE_HWRM_COMPLETE) {
+ bnge_hwrm_err(bd, ctx, "No hwrm cmpl received: 0x%x\n",
+ req_type);
+ goto exit;
+ }
+ len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
+ valid = ((u8 *)ctx->resp) + len - 1;
+ } else {
+ __le16 seen_out_of_seq = ctx->req->seq_id; /* will never see */
+ int j;
+
+ /* Check if response len is updated */
+ for (i = 0; i < tmo_count; i++) {
+ if (token &&
+ READ_ONCE(token->state) == BNGE_HWRM_DEFERRED) {
+ bnge_hwrm_destroy_token(bd, token);
+ token = NULL;
+ }
+
+ len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
+ if (len) {
+ __le16 resp_seq = READ_ONCE(ctx->resp->seq_id);
+
+ if (resp_seq == ctx->req->seq_id)
+ break;
+ if (resp_seq != seen_out_of_seq) {
+ dev_warn(bd->dev, "Discarding out of seq response: 0x%x for msg {0x%x 0x%x}\n",
+ le16_to_cpu(resp_seq), req_type, le16_to_cpu(ctx->req->seq_id));
+ seen_out_of_seq = resp_seq;
+ }
+ }
+
+ /* on first few passes, just barely sleep */
+ if (i < BNGE_HWRM_SHORT_TIMEOUT_COUNTER) {
+ usleep_range(BNGE_HWRM_SHORT_MIN_TIMEOUT,
+ BNGE_HWRM_SHORT_MAX_TIMEOUT);
+ } else {
+ usleep_range(BNGE_HWRM_MIN_TIMEOUT,
+ BNGE_HWRM_MAX_TIMEOUT);
+ }
+ }
+
+ if (i >= tmo_count) {
+ bnge_hwrm_err(bd, ctx,
+ "Error (timeout: %u) msg {0x%x 0x%x} len:%d\n",
+ bnge_hwrm_timeout(i), req_type,
+ le16_to_cpu(ctx->req->seq_id), len);
+ goto exit;
+ }
+
+ /* Last byte of resp contains valid bit */
+ valid = ((u8 *)ctx->resp) + len - 1;
+ for (j = 0; j < BNGE_HWRM_FIN_WAIT_USEC; ) {
+ /* make sure we read from updated DMA memory */
+ dma_rmb();
+ if (*valid)
+ break;
+ if (j < 10) {
+ udelay(1);
+ j++;
+ } else {
+ usleep_range(20, 30);
+ j += 20;
+ }
+ }
+
+ if (j >= BNGE_HWRM_FIN_WAIT_USEC) {
+ bnge_hwrm_err(bd, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
+ bnge_hwrm_timeout(i) + j, req_type,
+ le16_to_cpu(ctx->req->seq_id), len, *valid);
+ goto exit;
+ }
+ }
+
+ /* Zero valid bit for compatibility. Valid bit in an older spec
+ * may become a new field in a newer spec. We must make sure that
+ * a new field not implemented by old spec will read zero.
+ */
+ *valid = 0;
+ rc = le16_to_cpu(ctx->resp->error_code);
+ if (rc == HWRM_ERR_CODE_BUSY && !(ctx->flags & BNGE_HWRM_CTX_SILENT))
+ dev_warn(bd->dev, "FW returned busy, hwrm req_type 0x%x\n",
+ req_type);
+ else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE)
+ bnge_hwrm_err(bd, ctx, "hwrm req_type 0x%x seq id 0x%x error %d\n",
+ req_type, le16_to_cpu(ctx->req->seq_id), rc);
+ rc = bnge_map_hwrm_error(rc);
+
+exit:
+ if (token)
+ bnge_hwrm_destroy_token(bd, token);
+ if (ctx->flags & BNGE_HWRM_INTERNAL_CTX_OWNED)
+ ctx->flags |= BNGE_HWRM_INTERNAL_RESP_DIRTY;
+ else
+ __hwrm_ctx_invalidate(bd, ctx);
+ return rc;
+}
+
+int bnge_hwrm_req_send(struct bnge_dev *bd, void *req)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+
+ if (!ctx)
+ return -EINVAL;
+
+ return __hwrm_send_ctx(bd, ctx);
+}
+
+int bnge_hwrm_req_send_silent(struct bnge_dev *bd, void *req)
+{
+ bnge_hwrm_req_flags(bd, req, BNGE_HWRM_CTX_SILENT);
+ return bnge_hwrm_req_send(bd, req);
+}
+
+void *
+bnge_hwrm_req_dma_slice(struct bnge_dev *bd, void *req, u32 size,
+ dma_addr_t *dma_handle)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+ u8 *end = ((u8 *)req) + BNGE_HWRM_DMA_SIZE;
+ struct input *input = req;
+ u8 *addr, *req_addr = req;
+ u32 max_offset, offset;
+
+ if (!ctx)
+ return NULL;
+
+ max_offset = BNGE_HWRM_DMA_SIZE - ctx->allocated;
+ offset = max_offset - size;
+ offset = ALIGN_DOWN(offset, BNGE_HWRM_DMA_ALIGN);
+ addr = req_addr + offset;
+
+ if (addr < req_addr + max_offset && req_addr + ctx->req_len <= addr) {
+ ctx->allocated = end - addr;
+ *dma_handle = ctx->dma_handle + offset;
+ return addr;
+ }
+
+ if (ctx->slice_addr) {
+ dev_err(bd->dev, "HWRM refusing to reallocate DMA slice, req_type = %u\n",
+ (u32)le16_to_cpu(input->req_type));
+ dump_stack();
+ return NULL;
+ }
+
+ addr = dma_alloc_coherent(bd->dev, size, dma_handle, ctx->gfp);
+ if (!addr)
+ return NULL;
+
+ ctx->slice_addr = addr;
+ ctx->slice_size = size;
+ ctx->slice_handle = *dma_handle;
+
+ return addr;
+}
+
+void bnge_cleanup_hwrm_resources(struct bnge_dev *bd)
+{
+ struct bnge_hwrm_wait_token *token;
+
+ dma_pool_destroy(bd->hwrm_dma_pool);
+ bd->hwrm_dma_pool = NULL;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(token, &bd->hwrm_pending_list, node)
+ WRITE_ONCE(token->state, BNGE_HWRM_CANCELLED);
+ rcu_read_unlock();
+}
+
+int bnge_init_hwrm_resources(struct bnge_dev *bd)
+{
+ bd->hwrm_dma_pool = dma_pool_create("bnge_hwrm", bd->dev,
+ BNGE_HWRM_DMA_SIZE,
+ BNGE_HWRM_DMA_ALIGN, 0);
+ if (!bd->hwrm_dma_pool)
+ return -ENOMEM;
+
+ INIT_HLIST_HEAD(&bd->hwrm_pending_list);
+ mutex_init(&bd->hwrm_cmd_lock);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h
new file mode 100644
index 000000000000..83794a12cc81
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_HWRM_H_
+#define _BNGE_HWRM_H_
+
+#include <linux/bnxt/hsi.h>
+
+enum bnge_hwrm_ctx_flags {
+ BNGE_HWRM_INTERNAL_CTX_OWNED = BIT(0),
+ BNGE_HWRM_INTERNAL_RESP_DIRTY = BIT(1),
+ BNGE_HWRM_CTX_SILENT = BIT(2),
+ BNGE_HWRM_FULL_WAIT = BIT(3),
+};
+
+#define BNGE_HWRM_API_FLAGS (BNGE_HWRM_CTX_SILENT | BNGE_HWRM_FULL_WAIT)
+
+struct bnge_hwrm_ctx {
+ u64 sentinel;
+ dma_addr_t dma_handle;
+ struct output *resp;
+ struct input *req;
+ dma_addr_t slice_handle;
+ void *slice_addr;
+ u32 slice_size;
+ u32 req_len;
+ enum bnge_hwrm_ctx_flags flags;
+ unsigned int timeout;
+ u32 allocated;
+ gfp_t gfp;
+};
+
+enum bnge_hwrm_wait_state {
+ BNGE_HWRM_PENDING,
+ BNGE_HWRM_DEFERRED,
+ BNGE_HWRM_COMPLETE,
+ BNGE_HWRM_CANCELLED,
+};
+
+enum bnge_hwrm_chnl { BNGE_HWRM_CHNL_CHIMP, BNGE_HWRM_CHNL_KONG };
+
+struct bnge_hwrm_wait_token {
+ struct rcu_head rcu;
+ struct hlist_node node;
+ enum bnge_hwrm_wait_state state;
+ enum bnge_hwrm_chnl dst;
+ u16 seq_id;
+};
+
+#define BNGE_DFLT_HWRM_CMD_TIMEOUT 500
+
+#define BNGE_GRCPF_REG_CHIMP_COMM 0x0
+#define BNGE_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100
+
+#define BNGE_HWRM_MAX_REQ_LEN (bd->hwrm_max_req_len)
+#define BNGE_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
+#define BNGE_HWRM_CMD_MAX_TIMEOUT 40000U
+#define BNGE_SHORT_HWRM_CMD_TIMEOUT 20
+#define BNGE_HWRM_CMD_TIMEOUT (bd->hwrm_cmd_timeout)
+#define BNGE_HWRM_RESET_TIMEOUT ((BNGE_HWRM_CMD_TIMEOUT) * 4)
+#define BNGE_HWRM_TARGET 0xffff
+#define BNGE_HWRM_NO_CMPL_RING -1
+#define BNGE_HWRM_REQ_MAX_SIZE 128
+#define BNGE_HWRM_DMA_SIZE (2 * PAGE_SIZE) /* space for req+resp */
+#define BNGE_HWRM_RESP_RESERVED PAGE_SIZE
+#define BNGE_HWRM_RESP_OFFSET (BNGE_HWRM_DMA_SIZE - \
+ BNGE_HWRM_RESP_RESERVED)
+#define BNGE_HWRM_CTX_OFFSET (BNGE_HWRM_RESP_OFFSET - \
+ sizeof(struct bnge_hwrm_ctx))
+#define BNGE_HWRM_DMA_ALIGN 16
+#define BNGE_HWRM_SENTINEL 0xb6e1f68a12e9a7eb /* arbitrary value */
+#define BNGE_HWRM_SHORT_MIN_TIMEOUT 3
+#define BNGE_HWRM_SHORT_MAX_TIMEOUT 10
+#define BNGE_HWRM_SHORT_TIMEOUT_COUNTER 5
+
+#define BNGE_HWRM_MIN_TIMEOUT 25
+#define BNGE_HWRM_MAX_TIMEOUT 40
+
+static inline unsigned int bnge_hwrm_timeout(unsigned int n)
+{
+ return n <= BNGE_HWRM_SHORT_TIMEOUT_COUNTER ?
+ n * BNGE_HWRM_SHORT_MIN_TIMEOUT :
+ BNGE_HWRM_SHORT_TIMEOUT_COUNTER *
+ BNGE_HWRM_SHORT_MIN_TIMEOUT +
+ (n - BNGE_HWRM_SHORT_TIMEOUT_COUNTER) *
+ BNGE_HWRM_MIN_TIMEOUT;
+}
+
+#define BNGE_HWRM_FIN_WAIT_USEC 50000
+
+void bnge_cleanup_hwrm_resources(struct bnge_dev *bd);
+int bnge_init_hwrm_resources(struct bnge_dev *bd);
+
+int bnge_hwrm_req_create(struct bnge_dev *bd, void **req, u16 req_type,
+ u32 req_len);
+#define bnge_hwrm_req_init(bd, req, req_type) \
+ bnge_hwrm_req_create((bd), (void **)&(req), (req_type), \
+ sizeof(*(req)))
+void *bnge_hwrm_req_hold(struct bnge_dev *bd, void *req);
+void bnge_hwrm_req_drop(struct bnge_dev *bd, void *req);
+void bnge_hwrm_req_flags(struct bnge_dev *bd, void *req,
+ enum bnge_hwrm_ctx_flags flags);
+void bnge_hwrm_req_timeout(struct bnge_dev *bd, void *req,
+ unsigned int timeout);
+int bnge_hwrm_req_send(struct bnge_dev *bd, void *req);
+int bnge_hwrm_req_send_silent(struct bnge_dev *bd, void *req);
+void bnge_hwrm_req_alloc_flags(struct bnge_dev *bd, void *req, gfp_t flags);
+void *bnge_hwrm_req_dma_slice(struct bnge_dev *bd, void *req, u32 size,
+ dma_addr_t *dma);
+#endif /* _BNGE_HWRM_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c
new file mode 100644
index 000000000000..5c178fade065
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c
@@ -0,0 +1,703 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/pci.h>
+#include <linux/bnxt/hsi.h>
+
+#include "bnge.h"
+#include "bnge_hwrm.h"
+#include "bnge_hwrm_lib.h"
+#include "bnge_rmem.h"
+#include "bnge_resc.h"
+
+int bnge_hwrm_ver_get(struct bnge_dev *bd)
+{
+ u32 dev_caps_cfg, hwrm_ver, hwrm_spec_code;
+ u16 fw_maj, fw_min, fw_bld, fw_rsv;
+ struct hwrm_ver_get_output *resp;
+ struct hwrm_ver_get_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VER_GET);
+ if (rc)
+ return rc;
+
+ bnge_hwrm_req_flags(bd, req, BNGE_HWRM_FULL_WAIT);
+ bd->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
+ req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
+ req->hwrm_intf_min = HWRM_VERSION_MINOR;
+ req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto hwrm_ver_get_exit;
+
+ memcpy(&bd->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
+
+ hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
+ resp->hwrm_intf_min_8b << 8 |
+ resp->hwrm_intf_upd_8b;
+ hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
+ HWRM_VERSION_UPDATE;
+
+ if (hwrm_spec_code > hwrm_ver)
+ snprintf(bd->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
+ HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
+ HWRM_VERSION_UPDATE);
+ else
+ snprintf(bd->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
+ resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
+ resp->hwrm_intf_upd_8b);
+
+ fw_maj = le16_to_cpu(resp->hwrm_fw_major);
+ fw_min = le16_to_cpu(resp->hwrm_fw_minor);
+ fw_bld = le16_to_cpu(resp->hwrm_fw_build);
+ fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
+
+ bd->fw_ver_code = BNGE_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
+ snprintf(bd->fw_ver_str, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ fw_maj, fw_min, fw_bld, fw_rsv);
+
+ if (strlen(resp->active_pkg_name)) {
+ int fw_ver_len = strlen(bd->fw_ver_str);
+
+ snprintf(bd->fw_ver_str + fw_ver_len,
+ FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
+ resp->active_pkg_name);
+ bd->fw_cap |= BNGE_FW_CAP_PKG_VER;
+ }
+
+ bd->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
+ if (!bd->hwrm_cmd_timeout)
+ bd->hwrm_cmd_timeout = BNGE_DFLT_HWRM_CMD_TIMEOUT;
+ bd->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
+ if (!bd->hwrm_cmd_max_timeout)
+ bd->hwrm_cmd_max_timeout = BNGE_HWRM_CMD_MAX_TIMEOUT;
+ else if (bd->hwrm_cmd_max_timeout > BNGE_HWRM_CMD_MAX_TIMEOUT)
+ dev_warn(bd->dev, "Default HWRM commands max timeout increased to %d seconds\n",
+ bd->hwrm_cmd_max_timeout / 1000);
+
+ bd->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
+ bd->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
+
+ if (bd->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
+ bd->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
+
+ bd->chip_num = le16_to_cpu(resp->chip_num);
+ bd->chip_rev = resp->chip_rev;
+
+ dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
+ if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
+ (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
+ bd->fw_cap |= BNGE_FW_CAP_SHORT_CMD;
+
+ if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
+ bd->fw_cap |= BNGE_FW_CAP_KONG_MB_CHNL;
+
+ if (dev_caps_cfg &
+ VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
+ bd->fw_cap |= BNGE_FW_CAP_CFA_ADV_FLOW;
+
+hwrm_ver_get_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int
+bnge_hwrm_nvm_dev_info(struct bnge_dev *bd,
+ struct hwrm_nvm_get_dev_info_output *nvm_info)
+{
+ struct hwrm_nvm_get_dev_info_output *resp;
+ struct hwrm_nvm_get_dev_info_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_NVM_GET_DEV_INFO);
+ if (rc)
+ return rc;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc)
+ memcpy(nvm_info, resp, sizeof(*resp));
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_func_reset(struct bnge_dev *bd)
+{
+ struct hwrm_func_reset_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_RESET);
+ if (rc)
+ return rc;
+
+ req->enables = 0;
+ bnge_hwrm_req_timeout(bd, req, BNGE_HWRM_RESET_TIMEOUT);
+ return bnge_hwrm_req_send(bd, req);
+}
+
+int bnge_hwrm_fw_set_time(struct bnge_dev *bd)
+{
+ struct hwrm_fw_set_time_input *req;
+ struct tm tm;
+ int rc;
+
+ time64_to_tm(ktime_get_real_seconds(), 0, &tm);
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FW_SET_TIME);
+ if (rc)
+ return rc;
+
+ req->year = cpu_to_le16(1900 + tm.tm_year);
+ req->month = 1 + tm.tm_mon;
+ req->day = tm.tm_mday;
+ req->hour = tm.tm_hour;
+ req->minute = tm.tm_min;
+ req->second = tm.tm_sec;
+ return bnge_hwrm_req_send(bd, req);
+}
+
+int bnge_hwrm_func_drv_rgtr(struct bnge_dev *bd)
+{
+ struct hwrm_func_drv_rgtr_output *resp;
+ struct hwrm_func_drv_rgtr_input *req;
+ u32 flags;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_DRV_RGTR);
+ if (rc)
+ return rc;
+
+ req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
+ FUNC_DRV_RGTR_REQ_ENABLES_VER |
+ FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
+
+ req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
+ flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
+
+ req->flags = cpu_to_le32(flags);
+ req->ver_maj_8b = DRV_VER_MAJ;
+ req->ver_min_8b = DRV_VER_MIN;
+ req->ver_upd_8b = DRV_VER_UPD;
+ req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
+ req->ver_min = cpu_to_le16(DRV_VER_MIN);
+ req->ver_upd = cpu_to_le16(DRV_VER_UPD);
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc) {
+ set_bit(BNGE_STATE_DRV_REGISTERED, &bd->state);
+ if (resp->flags &
+ cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
+ bd->fw_cap |= BNGE_FW_CAP_IF_CHANGE;
+ }
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_func_drv_unrgtr(struct bnge_dev *bd)
+{
+ struct hwrm_func_drv_unrgtr_input *req;
+ int rc;
+
+ if (!test_and_clear_bit(BNGE_STATE_DRV_REGISTERED, &bd->state))
+ return 0;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_DRV_UNRGTR);
+ if (rc)
+ return rc;
+ return bnge_hwrm_req_send(bd, req);
+}
+
+static void bnge_init_ctx_initializer(struct bnge_ctx_mem_type *ctxm,
+ u8 init_val, u8 init_offset,
+ bool init_mask_set)
+{
+ ctxm->init_value = init_val;
+ ctxm->init_offset = BNGE_CTX_INIT_INVALID_OFFSET;
+ if (init_mask_set)
+ ctxm->init_offset = init_offset * 4;
+ else
+ ctxm->init_value = 0;
+}
+
+static int bnge_alloc_all_ctx_pg_info(struct bnge_dev *bd, int ctx_max)
+{
+ struct bnge_ctx_mem_info *ctx = bd->ctx;
+ u16 type;
+
+ for (type = 0; type < ctx_max; type++) {
+ struct bnge_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ int n = 1;
+
+ if (!ctxm->max_entries)
+ continue;
+
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL);
+ if (!ctxm->pg_info)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+#define BNGE_CTX_INIT_VALID(flags) \
+ (!!((flags) & \
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
+
+int bnge_hwrm_func_backing_store_qcaps(struct bnge_dev *bd)
+{
+ struct hwrm_func_backing_store_qcaps_v2_output *resp;
+ struct hwrm_func_backing_store_qcaps_v2_input *req;
+ struct bnge_ctx_mem_info *ctx;
+ u16 type;
+ int rc;
+
+ if (bd->ctx)
+ return 0;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
+ if (rc)
+ return rc;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ bd->ctx = ctx;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+
+ for (type = 0; type < BNGE_CTX_V2_MAX; ) {
+ struct bnge_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ u8 init_val, init_off, i;
+ __le32 *p;
+ u32 flags;
+
+ req->type = cpu_to_le16(type);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto ctx_done;
+ flags = le32_to_cpu(resp->flags);
+ type = le16_to_cpu(resp->next_valid_type);
+ if (!(flags &
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID))
+ continue;
+
+ ctxm->type = le16_to_cpu(resp->type);
+ ctxm->entry_size = le16_to_cpu(resp->entry_size);
+ ctxm->flags = flags;
+ ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
+ ctxm->entry_multiple = resp->entry_multiple;
+ ctxm->max_entries = le32_to_cpu(resp->max_num_entries);
+ ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
+ init_val = resp->ctx_init_value;
+ init_off = resp->ctx_init_offset;
+ bnge_init_ctx_initializer(ctxm, init_val, init_off,
+ BNGE_CTX_INIT_VALID(flags));
+ ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
+ BNGE_MAX_SPLIT_ENTRY);
+ for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
+ i++, p++)
+ ctxm->split[i] = le32_to_cpu(*p);
+ }
+ rc = bnge_alloc_all_ctx_pg_info(bd, BNGE_CTX_V2_MAX);
+
+ctx_done:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+static void bnge_hwrm_set_pg_attr(struct bnge_ring_mem_info *rmem, u8 *pg_attr,
+ __le64 *pg_dir)
+{
+ if (!rmem->nr_pages)
+ return;
+
+ BNGE_SET_CTX_PAGE_ATTR(*pg_attr);
+ if (rmem->depth >= 1) {
+ if (rmem->depth == 2)
+ *pg_attr |= 2;
+ else
+ *pg_attr |= 1;
+ *pg_dir = cpu_to_le64(rmem->dma_pg_tbl);
+ } else {
+ *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
+ }
+}
+
+int bnge_hwrm_func_backing_store(struct bnge_dev *bd,
+ struct bnge_ctx_mem_type *ctxm,
+ bool last)
+{
+ struct hwrm_func_backing_store_cfg_v2_input *req;
+ u32 instance_bmap = ctxm->instance_bmap;
+ int i, j, rc = 0, n = 1;
+ __le32 *p;
+
+ if (!(ctxm->flags & BNGE_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
+ return 0;
+
+ if (instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ else
+ instance_bmap = 1;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
+ if (rc)
+ return rc;
+ bnge_hwrm_req_hold(bd, req);
+ req->type = cpu_to_le16(ctxm->type);
+ req->entry_size = cpu_to_le16(ctxm->entry_size);
+ req->subtype_valid_cnt = ctxm->split_entry_cnt;
+ for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
+ p[i] = cpu_to_le32(ctxm->split[i]);
+ for (i = 0, j = 0; j < n && !rc; i++) {
+ struct bnge_ctx_pg_info *ctx_pg;
+
+ if (!(instance_bmap & (1 << i)))
+ continue;
+ req->instance = cpu_to_le16(i);
+ ctx_pg = &ctxm->pg_info[j++];
+ if (!ctx_pg->entries)
+ continue;
+ req->num_entries = cpu_to_le32(ctx_pg->entries);
+ bnge_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req->page_size_pbl_level,
+ &req->page_dir);
+ if (last && j == n)
+ req->flags =
+ cpu_to_le32(BNGE_BS_CFG_ALL_DONE);
+ rc = bnge_hwrm_req_send(bd, req);
+ }
+ bnge_hwrm_req_drop(bd, req);
+
+ return rc;
+}
+
+static int bnge_hwrm_get_rings(struct bnge_dev *bd)
+{
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+ struct hwrm_func_qcfg_output *resp;
+ struct hwrm_func_qcfg_input *req;
+ u16 cp, stats;
+ u16 rx, tx;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCFG);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc) {
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+ }
+
+ hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
+ hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
+ hw_resc->resv_hw_ring_grps =
+ le32_to_cpu(resp->alloc_hw_ring_grps);
+ hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
+ hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
+ cp = le16_to_cpu(resp->alloc_cmpl_rings);
+ stats = le16_to_cpu(resp->alloc_stat_ctx);
+ hw_resc->resv_irqs = cp;
+ rx = hw_resc->resv_rx_rings;
+ tx = hw_resc->resv_tx_rings;
+ if (bnge_is_agg_reqd(bd))
+ rx >>= 1;
+ if (cp < (rx + tx)) {
+ rc = bnge_fix_rings_count(&rx, &tx, cp, false);
+ if (rc)
+ goto get_rings_exit;
+ if (bnge_is_agg_reqd(bd))
+ rx <<= 1;
+ hw_resc->resv_rx_rings = rx;
+ hw_resc->resv_tx_rings = tx;
+ }
+ hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
+ hw_resc->resv_hw_ring_grps = rx;
+ hw_resc->resv_cp_rings = cp;
+ hw_resc->resv_stat_ctxs = stats;
+
+get_rings_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+static struct hwrm_func_cfg_input *
+__bnge_hwrm_reserve_pf_rings(struct bnge_dev *bd, struct bnge_hw_rings *hwr)
+{
+ struct hwrm_func_cfg_input *req;
+ u32 enables = 0;
+
+ if (bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCFG))
+ return NULL;
+
+ req->fid = cpu_to_le16(0xffff);
+ enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+ req->num_tx_rings = cpu_to_le16(hwr->tx);
+
+ enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+ enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= hwr->nq ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
+ enables |= hwr->cmpl ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
+ enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
+ enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+
+ req->num_rx_rings = cpu_to_le16(hwr->rx);
+ req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
+ req->num_cmpl_rings = cpu_to_le16(hwr->cmpl);
+ req->num_msix = cpu_to_le16(hwr->nq);
+ req->num_stat_ctxs = cpu_to_le16(hwr->stat);
+ req->num_vnics = cpu_to_le16(hwr->vnic);
+ req->enables = cpu_to_le32(enables);
+
+ return req;
+}
+
+static int
+bnge_hwrm_reserve_pf_rings(struct bnge_dev *bd, struct bnge_hw_rings *hwr)
+{
+ struct hwrm_func_cfg_input *req;
+ int rc;
+
+ req = __bnge_hwrm_reserve_pf_rings(bd, hwr);
+ if (!req)
+ return -ENOMEM;
+
+ if (!req->enables) {
+ bnge_hwrm_req_drop(bd, req);
+ return 0;
+ }
+
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ return rc;
+
+ return bnge_hwrm_get_rings(bd);
+}
+
+int bnge_hwrm_reserve_rings(struct bnge_dev *bd, struct bnge_hw_rings *hwr)
+{
+ return bnge_hwrm_reserve_pf_rings(bd, hwr);
+}
+
+int bnge_hwrm_func_qcfg(struct bnge_dev *bd)
+{
+ struct hwrm_func_qcfg_output *resp;
+ struct hwrm_func_qcfg_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCFG);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto func_qcfg_exit;
+
+ bd->max_mtu = le16_to_cpu(resp->max_mtu_configured);
+ if (!bd->max_mtu)
+ bd->max_mtu = BNGE_MAX_MTU;
+
+ if (bd->db_size)
+ goto func_qcfg_exit;
+
+ bd->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
+ bd->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
+ 1024);
+ if (!bd->db_size || bd->db_size > pci_resource_len(bd->pdev, 2) ||
+ bd->db_size <= bd->db_offset)
+ bd->db_size = pci_resource_len(bd->pdev, 2);
+
+func_qcfg_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_func_resc_qcaps(struct bnge_dev *bd)
+{
+ struct hwrm_func_resource_qcaps_output *resp;
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+ struct hwrm_func_resource_qcaps_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_RESOURCE_QCAPS);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send_silent(bd, req);
+ if (rc)
+ goto hwrm_func_resc_qcaps_exit;
+
+ hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
+ hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
+ hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+ hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
+ hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+ hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
+ hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+ hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
+ hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+ hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
+ hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
+ hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
+ hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+ hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
+ hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
+ hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
+ hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+
+ hw_resc->max_nqs = le16_to_cpu(resp->max_msix);
+ hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
+
+hwrm_func_resc_qcaps_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_func_qcaps(struct bnge_dev *bd)
+{
+ struct hwrm_func_qcaps_output *resp;
+ struct hwrm_func_qcaps_input *req;
+ struct bnge_pf_info *pf = &bd->pf;
+ u32 flags;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCAPS);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto hwrm_func_qcaps_exit;
+
+ flags = le32_to_cpu(resp->flags);
+ if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
+ bd->flags |= BNGE_EN_ROCE_V1;
+ if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
+ bd->flags |= BNGE_EN_ROCE_V2;
+
+ pf->fw_fid = le16_to_cpu(resp->fid);
+ pf->port_id = le16_to_cpu(resp->port_id);
+ memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
+
+ bd->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
+
+hwrm_func_qcaps_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_vnic_qcaps(struct bnge_dev *bd)
+{
+ struct hwrm_vnic_qcaps_output *resp;
+ struct hwrm_vnic_qcaps_input *req;
+ int rc;
+
+ bd->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
+ bd->rss_cap &= ~BNGE_RSS_CAP_NEW_RSS_CAP;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_QCAPS);
+ if (rc)
+ return rc;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc) {
+ u32 flags = le32_to_cpu(resp->flags);
+
+ if (flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP)
+ bd->fw_cap |= BNGE_FW_CAP_VLAN_RX_STRIP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
+ bd->rss_cap |= BNGE_RSS_CAP_RSS_HASH_TYPE_DELTA;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED)
+ bd->rss_cap |= BNGE_RSS_CAP_RSS_TCAM;
+ bd->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
+ if (bd->max_tpa_v2)
+ bd->hw_ring_stats_size = BNGE_RING_STATS_SIZE;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
+ bd->fw_cap |= BNGE_FW_CAP_VNIC_TUNNEL_TPA;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
+ bd->rss_cap |= BNGE_RSS_CAP_AH_V4_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
+ bd->rss_cap |= BNGE_RSS_CAP_AH_V6_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
+ bd->rss_cap |= BNGE_RSS_CAP_ESP_V4_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
+ bd->rss_cap |= BNGE_RSS_CAP_ESP_V6_RSS_CAP;
+ }
+ bnge_hwrm_req_drop(bd, req);
+
+ return rc;
+}
+
+#define BNGE_CNPQ(q_profile) \
+ ((q_profile) == \
+ QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP)
+
+int bnge_hwrm_queue_qportcfg(struct bnge_dev *bd)
+{
+ struct hwrm_queue_qportcfg_output *resp;
+ struct hwrm_queue_qportcfg_input *req;
+ u8 i, j, *qptr;
+ bool no_rdma;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_QUEUE_QPORTCFG);
+ if (rc)
+ return rc;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto qportcfg_exit;
+
+ if (!resp->max_configurable_queues) {
+ rc = -EINVAL;
+ goto qportcfg_exit;
+ }
+ bd->max_tc = resp->max_configurable_queues;
+ bd->max_lltc = resp->max_configurable_lossless_queues;
+ if (bd->max_tc > BNGE_MAX_QUEUE)
+ bd->max_tc = BNGE_MAX_QUEUE;
+
+ no_rdma = !bnge_is_roce_en(bd);
+ qptr = &resp->queue_id0;
+ for (i = 0, j = 0; i < bd->max_tc; i++) {
+ bd->q_info[j].queue_id = *qptr;
+ bd->q_ids[i] = *qptr++;
+ bd->q_info[j].queue_profile = *qptr++;
+ bd->tc_to_qidx[j] = j;
+ if (!BNGE_CNPQ(bd->q_info[j].queue_profile) || no_rdma)
+ j++;
+ }
+ bd->max_q = bd->max_tc;
+ bd->max_tc = max_t(u8, j, 1);
+
+ if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
+ bd->max_tc = 1;
+
+ if (bd->max_lltc > bd->max_tc)
+ bd->max_lltc = bd->max_tc;
+
+qportcfg_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h
new file mode 100644
index 000000000000..6c03923eb559
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_HWRM_LIB_H_
+#define _BNGE_HWRM_LIB_H_
+
+int bnge_hwrm_ver_get(struct bnge_dev *bd);
+int bnge_hwrm_func_reset(struct bnge_dev *bd);
+int bnge_hwrm_fw_set_time(struct bnge_dev *bd);
+int bnge_hwrm_func_drv_rgtr(struct bnge_dev *bd);
+int bnge_hwrm_func_drv_unrgtr(struct bnge_dev *bd);
+int bnge_hwrm_vnic_qcaps(struct bnge_dev *bd);
+int bnge_hwrm_nvm_dev_info(struct bnge_dev *bd,
+ struct hwrm_nvm_get_dev_info_output *nvm_dev_info);
+int bnge_hwrm_func_backing_store(struct bnge_dev *bd,
+ struct bnge_ctx_mem_type *ctxm,
+ bool last);
+int bnge_hwrm_func_backing_store_qcaps(struct bnge_dev *bd);
+int bnge_hwrm_reserve_rings(struct bnge_dev *bd,
+ struct bnge_hw_rings *hwr);
+int bnge_hwrm_func_qcaps(struct bnge_dev *bd);
+int bnge_hwrm_vnic_qcaps(struct bnge_dev *bd);
+int bnge_hwrm_func_qcfg(struct bnge_dev *bd);
+int bnge_hwrm_func_resc_qcaps(struct bnge_dev *bd);
+int bnge_hwrm_queue_qportcfg(struct bnge_dev *bd);
+
+#endif /* _BNGE_HWRM_LIB_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
new file mode 100644
index 000000000000..02254934f3d0
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <asm/byteorder.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if.h>
+#include <net/ip.h>
+#include <linux/skbuff.h>
+
+#include "bnge.h"
+#include "bnge_hwrm_lib.h"
+#include "bnge_ethtool.h"
+
+static netdev_tx_t bnge_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static int bnge_open(struct net_device *dev)
+{
+ return 0;
+}
+
+static int bnge_close(struct net_device *dev)
+{
+ return 0;
+}
+
+static const struct net_device_ops bnge_netdev_ops = {
+ .ndo_open = bnge_open,
+ .ndo_stop = bnge_close,
+ .ndo_start_xmit = bnge_start_xmit,
+};
+
+static void bnge_init_mac_addr(struct bnge_dev *bd)
+{
+ eth_hw_addr_set(bd->netdev, bd->pf.mac_addr);
+}
+
+static void bnge_set_tpa_flags(struct bnge_dev *bd)
+{
+ struct bnge_net *bn = netdev_priv(bd->netdev);
+
+ bn->priv_flags &= ~BNGE_NET_EN_TPA;
+
+ if (bd->netdev->features & NETIF_F_LRO)
+ bn->priv_flags |= BNGE_NET_EN_LRO;
+ else if (bd->netdev->features & NETIF_F_GRO_HW)
+ bn->priv_flags |= BNGE_NET_EN_GRO;
+}
+
+static void bnge_init_l2_fltr_tbl(struct bnge_net *bn)
+{
+ int i;
+
+ for (i = 0; i < BNGE_L2_FLTR_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&bn->l2_fltr_hash_tbl[i]);
+ get_random_bytes(&bn->hash_seed, sizeof(bn->hash_seed));
+}
+
+void bnge_set_ring_params(struct bnge_dev *bd)
+{
+ struct bnge_net *bn = netdev_priv(bd->netdev);
+ u32 ring_size, rx_size, rx_space, max_rx_cmpl;
+ u32 agg_factor = 0, agg_ring_size = 0;
+
+ /* 8 for CRC and VLAN */
+ rx_size = SKB_DATA_ALIGN(bn->netdev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
+
+ rx_space = rx_size + ALIGN(NET_SKB_PAD, 8) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ bn->rx_copy_thresh = BNGE_RX_COPY_THRESH;
+ ring_size = bn->rx_ring_size;
+ bn->rx_agg_ring_size = 0;
+ bn->rx_agg_nr_pages = 0;
+
+ if (bn->priv_flags & BNGE_NET_EN_TPA)
+ agg_factor = min_t(u32, 4, 65536 / BNGE_RX_PAGE_SIZE);
+
+ bn->priv_flags &= ~BNGE_NET_EN_JUMBO;
+ if (rx_space > PAGE_SIZE) {
+ u32 jumbo_factor;
+
+ bn->priv_flags |= BNGE_NET_EN_JUMBO;
+ jumbo_factor = PAGE_ALIGN(bn->netdev->mtu - 40) >> PAGE_SHIFT;
+ if (jumbo_factor > agg_factor)
+ agg_factor = jumbo_factor;
+ }
+ if (agg_factor) {
+ if (ring_size > BNGE_MAX_RX_DESC_CNT_JUM_ENA) {
+ ring_size = BNGE_MAX_RX_DESC_CNT_JUM_ENA;
+ netdev_warn(bn->netdev, "RX ring size reduced from %d to %d due to jumbo ring\n",
+ bn->rx_ring_size, ring_size);
+ bn->rx_ring_size = ring_size;
+ }
+ agg_ring_size = ring_size * agg_factor;
+
+ bn->rx_agg_nr_pages = bnge_adjust_pow_two(agg_ring_size,
+ RX_DESC_CNT);
+ if (bn->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
+ u32 tmp = agg_ring_size;
+
+ bn->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
+ agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
+ netdev_warn(bn->netdev, "RX agg ring size %d reduced to %d.\n",
+ tmp, agg_ring_size);
+ }
+ bn->rx_agg_ring_size = agg_ring_size;
+ bn->rx_agg_ring_mask = (bn->rx_agg_nr_pages * RX_DESC_CNT) - 1;
+
+ rx_size = SKB_DATA_ALIGN(BNGE_RX_COPY_THRESH + NET_IP_ALIGN);
+ rx_space = rx_size + NET_SKB_PAD +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ }
+
+ bn->rx_buf_use_size = rx_size;
+ bn->rx_buf_size = rx_space;
+
+ bn->rx_nr_pages = bnge_adjust_pow_two(ring_size, RX_DESC_CNT);
+ bn->rx_ring_mask = (bn->rx_nr_pages * RX_DESC_CNT) - 1;
+
+ ring_size = bn->tx_ring_size;
+ bn->tx_nr_pages = bnge_adjust_pow_two(ring_size, TX_DESC_CNT);
+ bn->tx_ring_mask = (bn->tx_nr_pages * TX_DESC_CNT) - 1;
+
+ max_rx_cmpl = bn->rx_ring_size;
+
+ if (bn->priv_flags & BNGE_NET_EN_TPA)
+ max_rx_cmpl += bd->max_tpa_v2;
+ ring_size = max_rx_cmpl * 2 + agg_ring_size + bn->tx_ring_size;
+ bn->cp_ring_size = ring_size;
+
+ bn->cp_nr_pages = bnge_adjust_pow_two(ring_size, CP_DESC_CNT);
+ if (bn->cp_nr_pages > MAX_CP_PAGES) {
+ bn->cp_nr_pages = MAX_CP_PAGES;
+ bn->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
+ netdev_warn(bn->netdev, "completion ring size %d reduced to %d.\n",
+ ring_size, bn->cp_ring_size);
+ }
+ bn->cp_bit = bn->cp_nr_pages * CP_DESC_CNT;
+ bn->cp_ring_mask = bn->cp_bit - 1;
+}
+
+int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs)
+{
+ struct net_device *netdev;
+ struct bnge_net *bn;
+ int rc;
+
+ netdev = alloc_etherdev_mqs(sizeof(*bn), max_irqs * BNGE_MAX_QUEUE,
+ max_irqs);
+ if (!netdev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(netdev, bd->dev);
+ bd->netdev = netdev;
+
+ netdev->netdev_ops = &bnge_netdev_ops;
+
+ bnge_set_ethtool_ops(netdev);
+
+ bn = netdev_priv(netdev);
+ bn->netdev = netdev;
+ bn->bd = bd;
+
+ netdev->min_mtu = ETH_ZLEN;
+ netdev->max_mtu = bd->max_mtu;
+
+ netdev->hw_features = NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_SG |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_PARTIAL |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM |
+ NETIF_F_GRO;
+
+ if (bd->flags & BNGE_EN_UDP_GSO_SUPP)
+ netdev->hw_features |= NETIF_F_GSO_UDP_L4;
+
+ if (BNGE_SUPPORTS_TPA(bd))
+ netdev->hw_features |= NETIF_F_LRO;
+
+ netdev->hw_enc_features = NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_SG |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_PARTIAL;
+
+ if (bd->flags & BNGE_EN_UDP_GSO_SUPP)
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
+
+ netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_GRE_CSUM;
+
+ netdev->vlan_features = netdev->hw_features | NETIF_F_HIGHDMA;
+ if (bd->fw_cap & BNGE_FW_CAP_VLAN_RX_STRIP)
+ netdev->hw_features |= BNGE_HW_FEATURE_VLAN_ALL_RX;
+ if (bd->fw_cap & BNGE_FW_CAP_VLAN_TX_INSERT)
+ netdev->hw_features |= BNGE_HW_FEATURE_VLAN_ALL_TX;
+
+ if (BNGE_SUPPORTS_TPA(bd))
+ netdev->hw_features |= NETIF_F_GRO_HW;
+
+ netdev->features |= netdev->hw_features | NETIF_F_HIGHDMA;
+
+ if (netdev->features & NETIF_F_GRO_HW)
+ netdev->features &= ~NETIF_F_LRO;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ netif_set_tso_max_size(netdev, GSO_MAX_SIZE);
+ if (bd->tso_max_segs)
+ netif_set_tso_max_segs(netdev, bd->tso_max_segs);
+
+ bn->rx_ring_size = BNGE_DEFAULT_RX_RING_SIZE;
+ bn->tx_ring_size = BNGE_DEFAULT_TX_RING_SIZE;
+
+ bnge_set_tpa_flags(bd);
+ bnge_set_ring_params(bd);
+
+ bnge_init_l2_fltr_tbl(bn);
+ bnge_init_mac_addr(bd);
+
+ rc = register_netdev(netdev);
+ if (rc) {
+ dev_err(bd->dev, "Register netdev failed rc: %d\n", rc);
+ goto err_netdev;
+ }
+
+ return 0;
+
+err_netdev:
+ free_netdev(netdev);
+ return rc;
+}
+
+void bnge_netdev_free(struct bnge_dev *bd)
+{
+ struct net_device *netdev = bd->netdev;
+
+ unregister_netdev(netdev);
+ free_netdev(netdev);
+ bd->netdev = NULL;
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
new file mode 100644
index 000000000000..a650d71a58db
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_NETDEV_H_
+#define _BNGE_NETDEV_H_
+
+#include <linux/bnxt/hsi.h>
+
+struct tx_bd {
+ __le32 tx_bd_len_flags_type;
+ #define TX_BD_TYPE (0x3f << 0)
+ #define TX_BD_TYPE_SHORT_TX_BD (0x00 << 0)
+ #define TX_BD_TYPE_LONG_TX_BD (0x10 << 0)
+ #define TX_BD_FLAGS_PACKET_END (1 << 6)
+ #define TX_BD_FLAGS_NO_CMPL (1 << 7)
+ #define TX_BD_FLAGS_BD_CNT (0x1f << 8)
+ #define TX_BD_FLAGS_BD_CNT_SHIFT 8
+ #define TX_BD_FLAGS_LHINT (3 << 13)
+ #define TX_BD_FLAGS_LHINT_SHIFT 13
+ #define TX_BD_FLAGS_LHINT_512_AND_SMALLER (0 << 13)
+ #define TX_BD_FLAGS_LHINT_512_TO_1023 (1 << 13)
+ #define TX_BD_FLAGS_LHINT_1024_TO_2047 (2 << 13)
+ #define TX_BD_FLAGS_LHINT_2048_AND_LARGER (3 << 13)
+ #define TX_BD_FLAGS_COAL_NOW (1 << 15)
+ #define TX_BD_LEN (0xffff << 16)
+ #define TX_BD_LEN_SHIFT 16
+ u32 tx_bd_opaque;
+ __le64 tx_bd_haddr;
+} __packed;
+
+struct rx_bd {
+ __le32 rx_bd_len_flags_type;
+ #define RX_BD_TYPE (0x3f << 0)
+ #define RX_BD_TYPE_RX_PACKET_BD 0x4
+ #define RX_BD_TYPE_RX_BUFFER_BD 0x5
+ #define RX_BD_TYPE_RX_AGG_BD 0x6
+ #define RX_BD_TYPE_16B_BD_SIZE (0 << 4)
+ #define RX_BD_TYPE_32B_BD_SIZE (1 << 4)
+ #define RX_BD_TYPE_48B_BD_SIZE (2 << 4)
+ #define RX_BD_TYPE_64B_BD_SIZE (3 << 4)
+ #define RX_BD_FLAGS_SOP (1 << 6)
+ #define RX_BD_FLAGS_EOP (1 << 7)
+ #define RX_BD_FLAGS_BUFFERS (3 << 8)
+ #define RX_BD_FLAGS_1_BUFFER_PACKET (0 << 8)
+ #define RX_BD_FLAGS_2_BUFFER_PACKET (1 << 8)
+ #define RX_BD_FLAGS_3_BUFFER_PACKET (2 << 8)
+ #define RX_BD_FLAGS_4_BUFFER_PACKET (3 << 8)
+ #define RX_BD_LEN (0xffff << 16)
+ #define RX_BD_LEN_SHIFT 16
+ u32 rx_bd_opaque;
+ __le64 rx_bd_haddr;
+};
+
+struct tx_cmp {
+ __le32 tx_cmp_flags_type;
+ #define CMP_TYPE (0x3f << 0)
+ #define CMP_TYPE_TX_L2_CMP 0
+ #define CMP_TYPE_TX_L2_COAL_CMP 2
+ #define CMP_TYPE_TX_L2_PKT_TS_CMP 4
+ #define CMP_TYPE_RX_L2_CMP 17
+ #define CMP_TYPE_RX_AGG_CMP 18
+ #define CMP_TYPE_RX_L2_TPA_START_CMP 19
+ #define CMP_TYPE_RX_L2_TPA_END_CMP 21
+ #define CMP_TYPE_RX_TPA_AGG_CMP 22
+ #define CMP_TYPE_RX_L2_V3_CMP 23
+ #define CMP_TYPE_RX_L2_TPA_START_V3_CMP 25
+ #define CMP_TYPE_STATUS_CMP 32
+ #define CMP_TYPE_REMOTE_DRIVER_REQ 34
+ #define CMP_TYPE_REMOTE_DRIVER_RESP 36
+ #define CMP_TYPE_ERROR_STATUS 48
+ #define CMPL_BASE_TYPE_STAT_EJECT 0x1aUL
+ #define CMPL_BASE_TYPE_HWRM_DONE 0x20UL
+ #define CMPL_BASE_TYPE_HWRM_FWD_REQ 0x22UL
+ #define CMPL_BASE_TYPE_HWRM_FWD_RESP 0x24UL
+ #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define TX_CMP_FLAGS_ERROR (1 << 6)
+ #define TX_CMP_FLAGS_PUSH (1 << 7)
+ u32 tx_cmp_opaque;
+ __le32 tx_cmp_errors_v;
+ #define TX_CMP_V (1 << 0)
+ #define TX_CMP_ERRORS_BUFFER_ERROR (7 << 1)
+ #define TX_CMP_ERRORS_BUFFER_ERROR_NO_ERROR 0
+ #define TX_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT 2
+ #define TX_CMP_ERRORS_BUFFER_ERROR_INVALID_STAG 4
+ #define TX_CMP_ERRORS_BUFFER_ERROR_STAG_BOUNDS 5
+ #define TX_CMP_ERRORS_ZERO_LENGTH_PKT (1 << 4)
+ #define TX_CMP_ERRORS_EXCESSIVE_BD_LEN (1 << 5)
+ #define TX_CMP_ERRORS_DMA_ERROR (1 << 6)
+ #define TX_CMP_ERRORS_HINT_TOO_SHORT (1 << 7)
+ __le32 sq_cons_idx;
+ #define TX_CMP_SQ_CONS_IDX_MASK 0x00ffffff
+};
+
+struct bnge_sw_tx_bd {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_LEN(len);
+ struct page *page;
+ u8 is_ts_pkt;
+ u8 is_push;
+ u8 action;
+ unsigned short nr_frags;
+ union {
+ u16 rx_prod;
+ u16 txts_prod;
+ };
+};
+
+struct bnge_sw_rx_bd {
+ void *data;
+ u8 *data_ptr;
+ dma_addr_t mapping;
+};
+
+struct bnge_sw_rx_agg_bd {
+ struct page *page;
+ unsigned int offset;
+ dma_addr_t mapping;
+};
+
+#define BNGE_RX_COPY_THRESH 256
+
+#define BNGE_HW_FEATURE_VLAN_ALL_RX \
+ (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)
+#define BNGE_HW_FEATURE_VLAN_ALL_TX \
+ (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX)
+
+enum {
+ BNGE_NET_EN_GRO = BIT(0),
+ BNGE_NET_EN_LRO = BIT(1),
+ BNGE_NET_EN_JUMBO = BIT(2),
+};
+
+#define BNGE_NET_EN_TPA (BNGE_NET_EN_GRO | BNGE_NET_EN_LRO)
+
+struct bnge_net {
+ struct bnge_dev *bd;
+ struct net_device *netdev;
+
+ u32 priv_flags;
+
+ u32 rx_ring_size;
+ u32 rx_buf_size;
+ u32 rx_buf_use_size; /* usable size */
+ u32 rx_agg_ring_size;
+ u32 rx_copy_thresh;
+ u32 rx_ring_mask;
+ u32 rx_agg_ring_mask;
+ u16 rx_nr_pages;
+ u16 rx_agg_nr_pages;
+
+ u32 tx_ring_size;
+ u32 tx_ring_mask;
+ u16 tx_nr_pages;
+
+ /* NQs and Completion rings */
+ u32 cp_ring_size;
+ u32 cp_ring_mask;
+ u32 cp_bit;
+ u16 cp_nr_pages;
+
+#define BNGE_L2_FLTR_HASH_SIZE 32
+#define BNGE_L2_FLTR_HASH_MASK (BNGE_L2_FLTR_HASH_SIZE - 1)
+ struct hlist_head l2_fltr_hash_tbl[BNGE_L2_FLTR_HASH_SIZE];
+ u32 hash_seed;
+ u64 toeplitz_prefix;
+};
+
+#define BNGE_DEFAULT_RX_RING_SIZE 511
+#define BNGE_DEFAULT_TX_RING_SIZE 511
+
+int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs);
+void bnge_netdev_free(struct bnge_dev *bd);
+void bnge_set_ring_params(struct bnge_dev *bd);
+
+#if (BNGE_PAGE_SHIFT == 16)
+#define MAX_RX_PAGES_AGG_ENA 1
+#define MAX_RX_PAGES 4
+#define MAX_RX_AGG_PAGES 4
+#define MAX_TX_PAGES 1
+#define MAX_CP_PAGES 16
+#else
+#define MAX_RX_PAGES_AGG_ENA 8
+#define MAX_RX_PAGES 32
+#define MAX_RX_AGG_PAGES 32
+#define MAX_TX_PAGES 8
+#define MAX_CP_PAGES 128
+#endif
+
+#define BNGE_RX_PAGE_SIZE (1 << BNGE_RX_PAGE_SHIFT)
+
+#define RX_DESC_CNT (BNGE_PAGE_SIZE / sizeof(struct rx_bd))
+#define TX_DESC_CNT (BNGE_PAGE_SIZE / sizeof(struct tx_bd))
+#define CP_DESC_CNT (BNGE_PAGE_SIZE / sizeof(struct tx_cmp))
+#define SW_RXBD_RING_SIZE (sizeof(struct bnge_sw_rx_bd) * RX_DESC_CNT)
+#define HW_RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT)
+#define SW_RXBD_AGG_RING_SIZE (sizeof(struct bnge_sw_rx_agg_bd) * RX_DESC_CNT)
+#define SW_TXBD_RING_SIZE (sizeof(struct bnge_sw_tx_bd) * TX_DESC_CNT)
+#define HW_TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT)
+#define HW_CMPD_RING_SIZE (sizeof(struct tx_cmp) * CP_DESC_CNT)
+#define BNGE_MAX_RX_DESC_CNT (RX_DESC_CNT * MAX_RX_PAGES - 1)
+#define BNGE_MAX_RX_DESC_CNT_JUM_ENA (RX_DESC_CNT * MAX_RX_PAGES_AGG_ENA - 1)
+#define BNGE_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
+#define BNGE_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1)
+
+#endif /* _BNGE_NETDEV_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_resc.c b/drivers/net/ethernet/broadcom/bnge/bnge_resc.c
new file mode 100644
index 000000000000..c79a3607a1b7
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_resc.c
@@ -0,0 +1,605 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+#include "bnge.h"
+#include "bnge_hwrm.h"
+#include "bnge_hwrm_lib.h"
+#include "bnge_resc.h"
+
+static u16 bnge_num_tx_to_cp(struct bnge_dev *bd, u16 tx)
+{
+ u16 tcs = bd->num_tc;
+
+ if (!tcs)
+ tcs = 1;
+
+ return tx / tcs;
+}
+
+static u16 bnge_get_max_func_irqs(struct bnge_dev *bd)
+{
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+
+ return min_t(u16, hw_resc->max_irqs, hw_resc->max_nqs);
+}
+
+static unsigned int bnge_get_max_func_stat_ctxs(struct bnge_dev *bd)
+{
+ return bd->hw_resc.max_stat_ctxs;
+}
+
+static unsigned int bnge_get_max_func_cp_rings(struct bnge_dev *bd)
+{
+ return bd->hw_resc.max_cp_rings;
+}
+
+static int bnge_aux_get_dflt_msix(struct bnge_dev *bd)
+{
+ int roce_msix = BNGE_MAX_ROCE_MSIX;
+
+ return min_t(int, roce_msix, num_online_cpus() + 1);
+}
+
+static u16 bnge_aux_get_msix(struct bnge_dev *bd)
+{
+ if (bnge_is_roce_en(bd))
+ return bd->aux_num_msix;
+
+ return 0;
+}
+
+static void bnge_aux_set_msix_num(struct bnge_dev *bd, u16 num)
+{
+ if (bnge_is_roce_en(bd))
+ bd->aux_num_msix = num;
+}
+
+static u16 bnge_aux_get_stat_ctxs(struct bnge_dev *bd)
+{
+ if (bnge_is_roce_en(bd))
+ return bd->aux_num_stat_ctxs;
+
+ return 0;
+}
+
+static void bnge_aux_set_stat_ctxs(struct bnge_dev *bd, u16 num_aux_ctx)
+{
+ if (bnge_is_roce_en(bd))
+ bd->aux_num_stat_ctxs = num_aux_ctx;
+}
+
+static u16 bnge_func_stat_ctxs_demand(struct bnge_dev *bd)
+{
+ return bd->nq_nr_rings + bnge_aux_get_stat_ctxs(bd);
+}
+
+static int bnge_get_dflt_aux_stat_ctxs(struct bnge_dev *bd)
+{
+ int stat_ctx = 0;
+
+ if (bnge_is_roce_en(bd)) {
+ stat_ctx = BNGE_MIN_ROCE_STAT_CTXS;
+
+ if (!bd->pf.port_id && bd->port_count > 1)
+ stat_ctx++;
+ }
+
+ return stat_ctx;
+}
+
+static u16 bnge_nqs_demand(struct bnge_dev *bd)
+{
+ return bd->nq_nr_rings + bnge_aux_get_msix(bd);
+}
+
+static u16 bnge_cprs_demand(struct bnge_dev *bd)
+{
+ return bd->tx_nr_rings + bd->rx_nr_rings;
+}
+
+static u16 bnge_get_avail_msix(struct bnge_dev *bd, int num)
+{
+ u16 max_irq = bnge_get_max_func_irqs(bd);
+ u16 total_demand = bd->nq_nr_rings + num;
+
+ if (max_irq < total_demand) {
+ num = max_irq - bd->nq_nr_rings;
+ if (num <= 0)
+ return 0;
+ }
+
+ return num;
+}
+
+static u16 bnge_num_cp_to_tx(struct bnge_dev *bd, u16 tx_chunks)
+{
+ return tx_chunks * bd->num_tc;
+}
+
+int bnge_fix_rings_count(u16 *rx, u16 *tx, u16 max, bool shared)
+{
+ u16 _rx = *rx, _tx = *tx;
+
+ if (shared) {
+ *rx = min_t(u16, _rx, max);
+ *tx = min_t(u16, _tx, max);
+ } else {
+ if (max < 2)
+ return -ENOMEM;
+ while (_rx + _tx > max) {
+ if (_rx > _tx && _rx > 1)
+ _rx--;
+ else if (_tx > 1)
+ _tx--;
+ }
+ *rx = _rx;
+ *tx = _tx;
+ }
+
+ return 0;
+}
+
+static int bnge_adjust_rings(struct bnge_dev *bd, u16 *rx,
+ u16 *tx, u16 max_nq, bool sh)
+{
+ u16 tx_chunks = bnge_num_tx_to_cp(bd, *tx);
+
+ if (tx_chunks != *tx) {
+ u16 tx_saved = tx_chunks, rc;
+
+ rc = bnge_fix_rings_count(rx, &tx_chunks, max_nq, sh);
+ if (rc)
+ return rc;
+ if (tx_chunks != tx_saved)
+ *tx = bnge_num_cp_to_tx(bd, tx_chunks);
+ return 0;
+ }
+
+ return bnge_fix_rings_count(rx, tx, max_nq, sh);
+}
+
+static int bnge_cal_nr_rss_ctxs(u16 rx_rings)
+{
+ if (!rx_rings)
+ return 0;
+
+ return bnge_adjust_pow_two(rx_rings - 1,
+ BNGE_RSS_TABLE_ENTRIES);
+}
+
+static u16 bnge_rss_ctxs_in_use(struct bnge_dev *bd,
+ struct bnge_hw_rings *hwr)
+{
+ return bnge_cal_nr_rss_ctxs(hwr->grp);
+}
+
+static u16 bnge_get_total_vnics(struct bnge_dev *bd, u16 rx_rings)
+{
+ return 1;
+}
+
+static u32 bnge_get_rxfh_indir_size(struct bnge_dev *bd)
+{
+ return bnge_cal_nr_rss_ctxs(bd->rx_nr_rings) *
+ BNGE_RSS_TABLE_ENTRIES;
+}
+
+static void bnge_set_dflt_rss_indir_tbl(struct bnge_dev *bd)
+{
+ u16 max_entries, pad;
+ u32 *rss_indir_tbl;
+ int i;
+
+ max_entries = bnge_get_rxfh_indir_size(bd);
+ rss_indir_tbl = &bd->rss_indir_tbl[0];
+
+ for (i = 0; i < max_entries; i++)
+ rss_indir_tbl[i] = ethtool_rxfh_indir_default(i,
+ bd->rx_nr_rings);
+
+ pad = bd->rss_indir_tbl_entries - max_entries;
+ if (pad)
+ memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl));
+}
+
+static void bnge_copy_reserved_rings(struct bnge_dev *bd,
+ struct bnge_hw_rings *hwr)
+{
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+
+ hwr->tx = hw_resc->resv_tx_rings;
+ hwr->rx = hw_resc->resv_rx_rings;
+ hwr->nq = hw_resc->resv_irqs;
+ hwr->cmpl = hw_resc->resv_cp_rings;
+ hwr->grp = hw_resc->resv_hw_ring_grps;
+ hwr->vnic = hw_resc->resv_vnics;
+ hwr->stat = hw_resc->resv_stat_ctxs;
+ hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
+}
+
+static bool bnge_rings_ok(struct bnge_hw_rings *hwr)
+{
+ return hwr->tx && hwr->rx && hwr->nq && hwr->grp && hwr->vnic &&
+ hwr->stat && hwr->cmpl;
+}
+
+static bool bnge_need_reserve_rings(struct bnge_dev *bd)
+{
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+ u16 cprs = bnge_cprs_demand(bd);
+ u16 rx = bd->rx_nr_rings, stat;
+ u16 nqs = bnge_nqs_demand(bd);
+ u16 vnic;
+
+ if (hw_resc->resv_tx_rings != bd->tx_nr_rings)
+ return true;
+
+ vnic = bnge_get_total_vnics(bd, rx);
+
+ if (bnge_is_agg_reqd(bd))
+ rx <<= 1;
+ stat = bnge_func_stat_ctxs_demand(bd);
+ if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cprs ||
+ hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat)
+ return true;
+ if (hw_resc->resv_irqs != nqs)
+ return true;
+
+ return false;
+}
+
+int bnge_reserve_rings(struct bnge_dev *bd)
+{
+ u16 aux_dflt_msix = bnge_aux_get_dflt_msix(bd);
+ struct bnge_hw_rings hwr = {0};
+ u16 rx_rings, old_rx_rings;
+ u16 nq = bd->nq_nr_rings;
+ u16 aux_msix = 0;
+ bool sh = false;
+ u16 tx_cp;
+ int rc;
+
+ if (!bnge_need_reserve_rings(bd))
+ return 0;
+
+ if (!bnge_aux_registered(bd)) {
+ aux_msix = bnge_get_avail_msix(bd, aux_dflt_msix);
+ if (!aux_msix)
+ bnge_aux_set_stat_ctxs(bd, 0);
+
+ if (aux_msix > aux_dflt_msix)
+ aux_msix = aux_dflt_msix;
+ hwr.nq = nq + aux_msix;
+ } else {
+ hwr.nq = bnge_nqs_demand(bd);
+ }
+
+ hwr.tx = bd->tx_nr_rings;
+ hwr.rx = bd->rx_nr_rings;
+ if (bd->flags & BNGE_EN_SHARED_CHNL)
+ sh = true;
+ hwr.cmpl = hwr.rx + hwr.tx;
+
+ hwr.vnic = bnge_get_total_vnics(bd, hwr.rx);
+
+ if (bnge_is_agg_reqd(bd))
+ hwr.rx <<= 1;
+ hwr.grp = bd->rx_nr_rings;
+ hwr.rss_ctx = bnge_rss_ctxs_in_use(bd, &hwr);
+ hwr.stat = bnge_func_stat_ctxs_demand(bd);
+ old_rx_rings = bd->hw_resc.resv_rx_rings;
+
+ rc = bnge_hwrm_reserve_rings(bd, &hwr);
+ if (rc)
+ return rc;
+
+ bnge_copy_reserved_rings(bd, &hwr);
+
+ rx_rings = hwr.rx;
+ if (bnge_is_agg_reqd(bd)) {
+ if (hwr.rx >= 2)
+ rx_rings = hwr.rx >> 1;
+ else
+ return -ENOMEM;
+ }
+
+ rx_rings = min_t(u16, rx_rings, hwr.grp);
+ hwr.nq = min_t(u16, hwr.nq, bd->nq_nr_rings);
+ if (hwr.stat > bnge_aux_get_stat_ctxs(bd))
+ hwr.stat -= bnge_aux_get_stat_ctxs(bd);
+ hwr.nq = min_t(u16, hwr.nq, hwr.stat);
+
+ /* Adjust the rings */
+ rc = bnge_adjust_rings(bd, &rx_rings, &hwr.tx, hwr.nq, sh);
+ if (bnge_is_agg_reqd(bd))
+ hwr.rx = rx_rings << 1;
+ tx_cp = hwr.tx;
+ hwr.nq = sh ? max_t(u16, tx_cp, rx_rings) : tx_cp + rx_rings;
+ bd->tx_nr_rings = hwr.tx;
+
+ if (rx_rings != bd->rx_nr_rings)
+ dev_warn(bd->dev, "RX rings resv reduced to %d than earlier %d requested\n",
+ rx_rings, bd->rx_nr_rings);
+
+ bd->rx_nr_rings = rx_rings;
+ bd->nq_nr_rings = hwr.nq;
+
+ if (!bnge_rings_ok(&hwr))
+ return -ENOMEM;
+
+ if (old_rx_rings != bd->hw_resc.resv_rx_rings)
+ bnge_set_dflt_rss_indir_tbl(bd);
+
+ if (!bnge_aux_registered(bd)) {
+ u16 resv_msix, resv_ctx, aux_ctxs;
+ struct bnge_hw_resc *hw_resc;
+
+ hw_resc = &bd->hw_resc;
+ resv_msix = hw_resc->resv_irqs - bd->nq_nr_rings;
+ aux_msix = min_t(u16, resv_msix, aux_msix);
+ bnge_aux_set_msix_num(bd, aux_msix);
+ resv_ctx = hw_resc->resv_stat_ctxs - bd->nq_nr_rings;
+ aux_ctxs = min(resv_ctx, bnge_aux_get_stat_ctxs(bd));
+ bnge_aux_set_stat_ctxs(bd, aux_ctxs);
+ }
+
+ return rc;
+}
+
+int bnge_alloc_irqs(struct bnge_dev *bd)
+{
+ u16 aux_msix, tx_cp, num_entries;
+ int i, irqs_demand, rc;
+ u16 max, min = 1;
+
+ irqs_demand = bnge_nqs_demand(bd);
+ max = bnge_get_max_func_irqs(bd);
+ if (irqs_demand > max)
+ irqs_demand = max;
+
+ if (!(bd->flags & BNGE_EN_SHARED_CHNL))
+ min = 2;
+
+ irqs_demand = pci_alloc_irq_vectors(bd->pdev, min, irqs_demand,
+ PCI_IRQ_MSIX);
+ aux_msix = bnge_aux_get_msix(bd);
+ if (irqs_demand < 0 || irqs_demand < aux_msix) {
+ rc = -ENODEV;
+ goto err_free_irqs;
+ }
+
+ num_entries = irqs_demand;
+ if (pci_msix_can_alloc_dyn(bd->pdev))
+ num_entries = max;
+ bd->irq_tbl = kcalloc(num_entries, sizeof(*bd->irq_tbl), GFP_KERNEL);
+ if (!bd->irq_tbl) {
+ rc = -ENOMEM;
+ goto err_free_irqs;
+ }
+
+ for (i = 0; i < irqs_demand; i++)
+ bd->irq_tbl[i].vector = pci_irq_vector(bd->pdev, i);
+
+ bd->irqs_acquired = irqs_demand;
+ /* Reduce rings based upon num of vectors allocated.
+ * We dont need to consider NQs as they have been calculated
+ * and must be more than irqs_demand.
+ */
+ rc = bnge_adjust_rings(bd, &bd->rx_nr_rings,
+ &bd->tx_nr_rings,
+ irqs_demand - aux_msix, min == 1);
+ if (rc)
+ goto err_free_irqs;
+
+ tx_cp = bnge_num_tx_to_cp(bd, bd->tx_nr_rings);
+ bd->nq_nr_rings = (min == 1) ?
+ max_t(u16, tx_cp, bd->rx_nr_rings) :
+ tx_cp + bd->rx_nr_rings;
+
+ /* Readjust tx_nr_rings_per_tc */
+ if (!bd->num_tc)
+ bd->tx_nr_rings_per_tc = bd->tx_nr_rings;
+
+ return 0;
+
+err_free_irqs:
+ dev_err(bd->dev, "Failed to allocate IRQs err = %d\n", rc);
+ bnge_free_irqs(bd);
+ return rc;
+}
+
+void bnge_free_irqs(struct bnge_dev *bd)
+{
+ pci_free_irq_vectors(bd->pdev);
+ kfree(bd->irq_tbl);
+ bd->irq_tbl = NULL;
+}
+
+static void _bnge_get_max_rings(struct bnge_dev *bd, u16 *max_rx,
+ u16 *max_tx, u16 *max_nq)
+{
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+ u16 max_ring_grps = 0, max_cp;
+ int rc;
+
+ *max_tx = hw_resc->max_tx_rings;
+ *max_rx = hw_resc->max_rx_rings;
+ *max_nq = min_t(int, bnge_get_max_func_irqs(bd),
+ hw_resc->max_stat_ctxs);
+ max_ring_grps = hw_resc->max_hw_ring_grps;
+ if (bnge_is_agg_reqd(bd))
+ *max_rx >>= 1;
+
+ max_cp = bnge_get_max_func_cp_rings(bd);
+
+ /* Fix RX and TX rings according to number of CPs available */
+ rc = bnge_fix_rings_count(max_rx, max_tx, max_cp, false);
+ if (rc) {
+ *max_rx = 0;
+ *max_tx = 0;
+ }
+
+ *max_rx = min_t(int, *max_rx, max_ring_grps);
+}
+
+static int bnge_get_max_rings(struct bnge_dev *bd, u16 *max_rx,
+ u16 *max_tx, bool shared)
+{
+ u16 rx, tx, nq;
+
+ _bnge_get_max_rings(bd, &rx, &tx, &nq);
+ *max_rx = rx;
+ *max_tx = tx;
+ if (!rx || !tx || !nq)
+ return -ENOMEM;
+
+ return bnge_fix_rings_count(max_rx, max_tx, nq, shared);
+}
+
+static int bnge_get_dflt_rings(struct bnge_dev *bd, u16 *max_rx, u16 *max_tx,
+ bool shared)
+{
+ int rc;
+
+ rc = bnge_get_max_rings(bd, max_rx, max_tx, shared);
+ if (rc) {
+ dev_info(bd->dev, "Not enough rings available\n");
+ return rc;
+ }
+
+ if (bnge_is_roce_en(bd)) {
+ int max_cp, max_stat, max_irq;
+
+ /* Reserve minimum resources for RoCE */
+ max_cp = bnge_get_max_func_cp_rings(bd);
+ max_stat = bnge_get_max_func_stat_ctxs(bd);
+ max_irq = bnge_get_max_func_irqs(bd);
+ if (max_cp <= BNGE_MIN_ROCE_CP_RINGS ||
+ max_irq <= BNGE_MIN_ROCE_CP_RINGS ||
+ max_stat <= BNGE_MIN_ROCE_STAT_CTXS)
+ return 0;
+
+ max_cp -= BNGE_MIN_ROCE_CP_RINGS;
+ max_irq -= BNGE_MIN_ROCE_CP_RINGS;
+ max_stat -= BNGE_MIN_ROCE_STAT_CTXS;
+ max_cp = min_t(u16, max_cp, max_irq);
+ max_cp = min_t(u16, max_cp, max_stat);
+ rc = bnge_adjust_rings(bd, max_rx, max_tx, max_cp, shared);
+ if (rc)
+ rc = 0;
+ }
+
+ return rc;
+}
+
+/* In initial default shared ring setting, each shared ring must have a
+ * RX/TX ring pair.
+ */
+static void bnge_trim_dflt_sh_rings(struct bnge_dev *bd)
+{
+ bd->nq_nr_rings = min_t(u16, bd->tx_nr_rings_per_tc, bd->rx_nr_rings);
+ bd->rx_nr_rings = bd->nq_nr_rings;
+ bd->tx_nr_rings_per_tc = bd->nq_nr_rings;
+ bd->tx_nr_rings = bd->tx_nr_rings_per_tc;
+}
+
+static int bnge_net_init_dflt_rings(struct bnge_dev *bd, bool sh)
+{
+ u16 dflt_rings, max_rx_rings, max_tx_rings;
+ int rc;
+
+ if (sh)
+ bd->flags |= BNGE_EN_SHARED_CHNL;
+
+ dflt_rings = netif_get_num_default_rss_queues();
+
+ rc = bnge_get_dflt_rings(bd, &max_rx_rings, &max_tx_rings, sh);
+ if (rc)
+ return rc;
+ bd->rx_nr_rings = min_t(u16, dflt_rings, max_rx_rings);
+ bd->tx_nr_rings_per_tc = min_t(u16, dflt_rings, max_tx_rings);
+ if (sh)
+ bnge_trim_dflt_sh_rings(bd);
+ else
+ bd->nq_nr_rings = bd->tx_nr_rings_per_tc + bd->rx_nr_rings;
+ bd->tx_nr_rings = bd->tx_nr_rings_per_tc;
+
+ rc = bnge_reserve_rings(bd);
+ if (rc && rc != -ENODEV)
+ dev_warn(bd->dev, "Unable to reserve tx rings\n");
+ bd->tx_nr_rings_per_tc = bd->tx_nr_rings;
+ if (sh)
+ bnge_trim_dflt_sh_rings(bd);
+
+ /* Rings may have been reduced, re-reserve them again */
+ if (bnge_need_reserve_rings(bd)) {
+ rc = bnge_reserve_rings(bd);
+ if (rc && rc != -ENODEV)
+ dev_warn(bd->dev, "Fewer rings reservation failed\n");
+ bd->tx_nr_rings_per_tc = bd->tx_nr_rings;
+ }
+ if (rc) {
+ bd->tx_nr_rings = 0;
+ bd->rx_nr_rings = 0;
+ }
+
+ return rc;
+}
+
+static int bnge_alloc_rss_indir_tbl(struct bnge_dev *bd)
+{
+ u16 entries;
+
+ entries = BNGE_MAX_RSS_TABLE_ENTRIES;
+
+ bd->rss_indir_tbl_entries = entries;
+ bd->rss_indir_tbl =
+ kmalloc_array(entries, sizeof(*bd->rss_indir_tbl), GFP_KERNEL);
+ if (!bd->rss_indir_tbl)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int bnge_net_init_dflt_config(struct bnge_dev *bd)
+{
+ struct bnge_hw_resc *hw_resc;
+ int rc;
+
+ rc = bnge_alloc_rss_indir_tbl(bd);
+ if (rc)
+ return rc;
+
+ rc = bnge_net_init_dflt_rings(bd, true);
+ if (rc)
+ goto err_free_tbl;
+
+ hw_resc = &bd->hw_resc;
+ bd->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
+ BNGE_L2_FLTR_MAX_FLTR;
+
+ return 0;
+
+err_free_tbl:
+ kfree(bd->rss_indir_tbl);
+ bd->rss_indir_tbl = NULL;
+ return rc;
+}
+
+void bnge_net_uninit_dflt_config(struct bnge_dev *bd)
+{
+ kfree(bd->rss_indir_tbl);
+ bd->rss_indir_tbl = NULL;
+}
+
+void bnge_aux_init_dflt_config(struct bnge_dev *bd)
+{
+ bd->aux_num_msix = bnge_aux_get_dflt_msix(bd);
+ bd->aux_num_stat_ctxs = bnge_get_dflt_aux_stat_ctxs(bd);
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_resc.h b/drivers/net/ethernet/broadcom/bnge/bnge_resc.h
new file mode 100644
index 000000000000..54ef1c7d8822
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_resc.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_RESC_H_
+#define _BNGE_RESC_H_
+
+#include "bnge_netdev.h"
+#include "bnge_rmem.h"
+
+struct bnge_hw_resc {
+ u16 min_rsscos_ctxs;
+ u16 max_rsscos_ctxs;
+ u16 resv_rsscos_ctxs;
+ u16 min_cp_rings;
+ u16 max_cp_rings;
+ u16 resv_cp_rings;
+ u16 min_tx_rings;
+ u16 max_tx_rings;
+ u16 resv_tx_rings;
+ u16 max_tx_sch_inputs;
+ u16 min_rx_rings;
+ u16 max_rx_rings;
+ u16 resv_rx_rings;
+ u16 min_hw_ring_grps;
+ u16 max_hw_ring_grps;
+ u16 resv_hw_ring_grps;
+ u16 min_l2_ctxs;
+ u16 max_l2_ctxs;
+ u16 min_vnics;
+ u16 max_vnics;
+ u16 resv_vnics;
+ u16 min_stat_ctxs;
+ u16 max_stat_ctxs;
+ u16 resv_stat_ctxs;
+ u16 max_nqs;
+ u16 max_irqs;
+ u16 resv_irqs;
+ u32 max_encap_records;
+ u32 max_decap_records;
+ u32 max_tx_em_flows;
+ u32 max_tx_wm_flows;
+ u32 max_rx_em_flows;
+ u32 max_rx_wm_flows;
+};
+
+struct bnge_hw_rings {
+ u16 tx;
+ u16 rx;
+ u16 grp;
+ u16 nq;
+ u16 cmpl;
+ u16 stat;
+ u16 vnic;
+ u16 rss_ctx;
+};
+
+/* "TXRX", 2 hypens, plus maximum integer */
+#define BNGE_IRQ_NAME_EXTRA 17
+struct bnge_irq {
+ irq_handler_t handler;
+ unsigned int vector;
+ u8 requested:1;
+ u8 have_cpumask:1;
+ char name[IFNAMSIZ + BNGE_IRQ_NAME_EXTRA];
+ cpumask_var_t cpu_mask;
+};
+
+int bnge_reserve_rings(struct bnge_dev *bd);
+int bnge_fix_rings_count(u16 *rx, u16 *tx, u16 max, bool shared);
+int bnge_alloc_irqs(struct bnge_dev *bd);
+void bnge_free_irqs(struct bnge_dev *bd);
+int bnge_net_init_dflt_config(struct bnge_dev *bd);
+void bnge_net_uninit_dflt_config(struct bnge_dev *bd);
+void bnge_aux_init_dflt_config(struct bnge_dev *bd);
+
+static inline u32
+bnge_adjust_pow_two(u32 total_ent, u16 ent_per_blk)
+{
+ u32 blks = total_ent / ent_per_blk;
+
+ if (blks == 0 || blks == 1)
+ return ++blks;
+
+ if (!is_power_of_2(blks))
+ blks = roundup_pow_of_two(blks);
+
+ return blks;
+}
+
+#define BNGE_MAX_ROCE_MSIX 64
+#define BNGE_MIN_ROCE_CP_RINGS 2
+#define BNGE_MIN_ROCE_STAT_CTXS 1
+
+#endif /* _BNGE_RESC_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c
new file mode 100644
index 000000000000..52ada65943a0
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/crash_dump.h>
+#include <linux/bnxt/hsi.h>
+
+#include "bnge.h"
+#include "bnge_hwrm_lib.h"
+#include "bnge_rmem.h"
+
+static void bnge_init_ctx_mem(struct bnge_ctx_mem_type *ctxm,
+ void *p, int len)
+{
+ u8 init_val = ctxm->init_value;
+ u16 offset = ctxm->init_offset;
+ u8 *p2 = p;
+ int i;
+
+ if (!init_val)
+ return;
+ if (offset == BNGE_CTX_INIT_INVALID_OFFSET) {
+ memset(p, init_val, len);
+ return;
+ }
+ for (i = 0; i < len; i += ctxm->entry_size)
+ *(p2 + i + offset) = init_val;
+}
+
+void bnge_free_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem)
+{
+ struct pci_dev *pdev = bd->pdev;
+ int i;
+
+ if (!rmem->pg_arr)
+ goto skip_pages;
+
+ for (i = 0; i < rmem->nr_pages; i++) {
+ if (!rmem->pg_arr[i])
+ continue;
+
+ dma_free_coherent(&pdev->dev, rmem->page_size,
+ rmem->pg_arr[i], rmem->dma_arr[i]);
+
+ rmem->pg_arr[i] = NULL;
+ }
+skip_pages:
+ if (rmem->pg_tbl) {
+ size_t pg_tbl_size = rmem->nr_pages * 8;
+
+ if (rmem->flags & BNGE_RMEM_USE_FULL_PAGE_FLAG)
+ pg_tbl_size = rmem->page_size;
+ dma_free_coherent(&pdev->dev, pg_tbl_size,
+ rmem->pg_tbl, rmem->dma_pg_tbl);
+ rmem->pg_tbl = NULL;
+ }
+ if (rmem->vmem_size && *rmem->vmem) {
+ vfree(*rmem->vmem);
+ *rmem->vmem = NULL;
+ }
+}
+
+int bnge_alloc_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem)
+{
+ struct pci_dev *pdev = bd->pdev;
+ u64 valid_bit = 0;
+ int i;
+
+ if (rmem->flags & (BNGE_RMEM_VALID_PTE_FLAG | BNGE_RMEM_RING_PTE_FLAG))
+ valid_bit = PTU_PTE_VALID;
+
+ if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
+ size_t pg_tbl_size = rmem->nr_pages * 8;
+
+ if (rmem->flags & BNGE_RMEM_USE_FULL_PAGE_FLAG)
+ pg_tbl_size = rmem->page_size;
+ rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
+ &rmem->dma_pg_tbl,
+ GFP_KERNEL);
+ if (!rmem->pg_tbl)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < rmem->nr_pages; i++) {
+ u64 extra_bits = valid_bit;
+
+ rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
+ rmem->page_size,
+ &rmem->dma_arr[i],
+ GFP_KERNEL);
+ if (!rmem->pg_arr[i])
+ return -ENOMEM;
+
+ if (rmem->ctx_mem)
+ bnge_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
+ rmem->page_size);
+
+ if (rmem->nr_pages > 1 || rmem->depth > 0) {
+ if (i == rmem->nr_pages - 2 &&
+ (rmem->flags & BNGE_RMEM_RING_PTE_FLAG))
+ extra_bits |= PTU_PTE_NEXT_TO_LAST;
+ else if (i == rmem->nr_pages - 1 &&
+ (rmem->flags & BNGE_RMEM_RING_PTE_FLAG))
+ extra_bits |= PTU_PTE_LAST;
+ rmem->pg_tbl[i] =
+ cpu_to_le64(rmem->dma_arr[i] | extra_bits);
+ }
+ }
+
+ if (rmem->vmem_size) {
+ *rmem->vmem = vzalloc(rmem->vmem_size);
+ if (!(*rmem->vmem))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int bnge_alloc_ctx_one_lvl(struct bnge_dev *bd,
+ struct bnge_ctx_pg_info *ctx_pg)
+{
+ struct bnge_ring_mem_info *rmem = &ctx_pg->ring_mem;
+
+ rmem->page_size = BNGE_PAGE_SIZE;
+ rmem->pg_arr = ctx_pg->ctx_pg_arr;
+ rmem->dma_arr = ctx_pg->ctx_dma_arr;
+ rmem->flags = BNGE_RMEM_VALID_PTE_FLAG;
+ if (rmem->depth >= 1)
+ rmem->flags |= BNGE_RMEM_USE_FULL_PAGE_FLAG;
+ return bnge_alloc_ring(bd, rmem);
+}
+
+static int bnge_alloc_ctx_pg_tbls(struct bnge_dev *bd,
+ struct bnge_ctx_pg_info *ctx_pg, u32 mem_size,
+ u8 depth, struct bnge_ctx_mem_type *ctxm)
+{
+ struct bnge_ring_mem_info *rmem = &ctx_pg->ring_mem;
+ int rc;
+
+ if (!mem_size)
+ return -EINVAL;
+
+ ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNGE_PAGE_SIZE);
+ if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
+ ctx_pg->nr_pages = 0;
+ return -EINVAL;
+ }
+ if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
+ int nr_tbls, i;
+
+ rmem->depth = 2;
+ ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
+ GFP_KERNEL);
+ if (!ctx_pg->ctx_pg_tbl)
+ return -ENOMEM;
+ nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
+ rmem->nr_pages = nr_tbls;
+ rc = bnge_alloc_ctx_one_lvl(bd, ctx_pg);
+ if (rc)
+ return rc;
+ for (i = 0; i < nr_tbls; i++) {
+ struct bnge_ctx_pg_info *pg_tbl;
+
+ pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
+ if (!pg_tbl)
+ return -ENOMEM;
+ ctx_pg->ctx_pg_tbl[i] = pg_tbl;
+ rmem = &pg_tbl->ring_mem;
+ rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
+ rmem->dma_pg_tbl = ctx_pg->ctx_dma_arr[i];
+ rmem->depth = 1;
+ rmem->nr_pages = MAX_CTX_PAGES;
+ rmem->ctx_mem = ctxm;
+ if (i == (nr_tbls - 1)) {
+ int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
+
+ if (rem)
+ rmem->nr_pages = rem;
+ }
+ rc = bnge_alloc_ctx_one_lvl(bd, pg_tbl);
+ if (rc)
+ break;
+ }
+ } else {
+ rmem->nr_pages = DIV_ROUND_UP(mem_size, BNGE_PAGE_SIZE);
+ if (rmem->nr_pages > 1 || depth)
+ rmem->depth = 1;
+ rmem->ctx_mem = ctxm;
+ rc = bnge_alloc_ctx_one_lvl(bd, ctx_pg);
+ }
+
+ return rc;
+}
+
+static void bnge_free_ctx_pg_tbls(struct bnge_dev *bd,
+ struct bnge_ctx_pg_info *ctx_pg)
+{
+ struct bnge_ring_mem_info *rmem = &ctx_pg->ring_mem;
+
+ if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
+ ctx_pg->ctx_pg_tbl) {
+ int i, nr_tbls = rmem->nr_pages;
+
+ for (i = 0; i < nr_tbls; i++) {
+ struct bnge_ctx_pg_info *pg_tbl;
+ struct bnge_ring_mem_info *rmem2;
+
+ pg_tbl = ctx_pg->ctx_pg_tbl[i];
+ if (!pg_tbl)
+ continue;
+ rmem2 = &pg_tbl->ring_mem;
+ bnge_free_ring(bd, rmem2);
+ ctx_pg->ctx_pg_arr[i] = NULL;
+ kfree(pg_tbl);
+ ctx_pg->ctx_pg_tbl[i] = NULL;
+ }
+ kfree(ctx_pg->ctx_pg_tbl);
+ ctx_pg->ctx_pg_tbl = NULL;
+ }
+ bnge_free_ring(bd, rmem);
+ ctx_pg->nr_pages = 0;
+}
+
+static int bnge_setup_ctxm_pg_tbls(struct bnge_dev *bd,
+ struct bnge_ctx_mem_type *ctxm, u32 entries,
+ u8 pg_lvl)
+{
+ struct bnge_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ int i, rc = 0, n = 1;
+ u32 mem_size;
+
+ if (!ctxm->entry_size || !ctx_pg)
+ return -EINVAL;
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ if (ctxm->entry_multiple)
+ entries = roundup(entries, ctxm->entry_multiple);
+ entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
+ mem_size = entries * ctxm->entry_size;
+ for (i = 0; i < n && !rc; i++) {
+ ctx_pg[i].entries = entries;
+ rc = bnge_alloc_ctx_pg_tbls(bd, &ctx_pg[i], mem_size, pg_lvl,
+ ctxm->init_value ? ctxm : NULL);
+ }
+
+ return rc;
+}
+
+static int bnge_backing_store_cfg(struct bnge_dev *bd, u32 ena)
+{
+ struct bnge_ctx_mem_info *ctx = bd->ctx;
+ struct bnge_ctx_mem_type *ctxm;
+ u16 last_type;
+ int rc = 0;
+ u16 type;
+
+ if (!ena)
+ return 0;
+ else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM)
+ last_type = BNGE_CTX_MAX - 1;
+ else
+ last_type = BNGE_CTX_L2_MAX - 1;
+ ctx->ctx_arr[last_type].last = 1;
+
+ for (type = 0 ; type < BNGE_CTX_V2_MAX; type++) {
+ ctxm = &ctx->ctx_arr[type];
+
+ rc = bnge_hwrm_func_backing_store(bd, ctxm, ctxm->last);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+void bnge_free_ctx_mem(struct bnge_dev *bd)
+{
+ struct bnge_ctx_mem_info *ctx = bd->ctx;
+ u16 type;
+
+ if (!ctx)
+ return;
+
+ for (type = 0; type < BNGE_CTX_V2_MAX; type++) {
+ struct bnge_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ struct bnge_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ int i, n = 1;
+
+ if (!ctx_pg)
+ continue;
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ for (i = 0; i < n; i++)
+ bnge_free_ctx_pg_tbls(bd, &ctx_pg[i]);
+
+ kfree(ctx_pg);
+ ctxm->pg_info = NULL;
+ }
+
+ ctx->flags &= ~BNGE_CTX_FLAG_INITED;
+ kfree(ctx);
+ bd->ctx = NULL;
+}
+
+#define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
+ (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
+
+int bnge_alloc_ctx_mem(struct bnge_dev *bd)
+{
+ struct bnge_ctx_mem_type *ctxm;
+ struct bnge_ctx_mem_info *ctx;
+ u32 l2_qps, qp1_qps, max_qps;
+ u32 ena, entries_sp, entries;
+ u32 srqs, max_srqs, min;
+ u32 num_mr, num_ah;
+ u32 extra_srqs = 0;
+ u32 extra_qps = 0;
+ u32 fast_qpmd_qps;
+ u8 pg_lvl = 1;
+ int i, rc;
+
+ rc = bnge_hwrm_func_backing_store_qcaps(bd);
+ if (rc) {
+ dev_err(bd->dev, "Failed querying ctx mem caps, rc: %d\n", rc);
+ return rc;
+ }
+
+ ctx = bd->ctx;
+ if (!ctx || (ctx->flags & BNGE_CTX_FLAG_INITED))
+ return 0;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_QP];
+ l2_qps = ctxm->qp_l2_entries;
+ qp1_qps = ctxm->qp_qp1_entries;
+ fast_qpmd_qps = ctxm->qp_fast_qpmd_entries;
+ max_qps = ctxm->max_entries;
+ ctxm = &ctx->ctx_arr[BNGE_CTX_SRQ];
+ srqs = ctxm->srq_l2_entries;
+ max_srqs = ctxm->max_entries;
+ ena = 0;
+ if (bnge_is_roce_en(bd) && !is_kdump_kernel()) {
+ pg_lvl = 2;
+ extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
+ /* allocate extra qps if fast qp destroy feature enabled */
+ extra_qps += fast_qpmd_qps;
+ extra_srqs = min_t(u32, 8192, max_srqs - srqs);
+ if (fast_qpmd_qps)
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
+ }
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_QP];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, l2_qps + qp1_qps + extra_qps,
+ pg_lvl);
+ if (rc)
+ return rc;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_SRQ];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, srqs + extra_srqs, pg_lvl);
+ if (rc)
+ return rc;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_CQ];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, ctxm->cq_l2_entries +
+ extra_qps * 2, pg_lvl);
+ if (rc)
+ return rc;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_VNIC];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, ctxm->max_entries, 1);
+ if (rc)
+ return rc;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_STAT];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, ctxm->max_entries, 1);
+ if (rc)
+ return rc;
+
+ if (!bnge_is_roce_en(bd))
+ goto skip_rdma;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_MRAV];
+ /* 128K extra is needed to accommodate static AH context
+ * allocation by f/w.
+ */
+ num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
+ num_ah = min_t(u32, num_mr, 1024 * 128);
+ ctxm->split_entry_cnt = BNGE_CTX_MRAV_AV_SPLIT_ENTRY + 1;
+ if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
+ ctxm->mrav_av_entries = num_ah;
+
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, num_mr + num_ah, 2);
+ if (rc)
+ return rc;
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_TIM];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, l2_qps + qp1_qps + extra_qps, 1);
+ if (rc)
+ return rc;
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
+
+skip_rdma:
+ ctxm = &ctx->ctx_arr[BNGE_CTX_STQM];
+ min = ctxm->min_entries;
+ entries_sp = ctx->ctx_arr[BNGE_CTX_VNIC].vnic_entries + l2_qps +
+ 2 * (extra_qps + qp1_qps) + min;
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, entries_sp, 2);
+ if (rc)
+ return rc;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_FTQM];
+ entries = l2_qps + 2 * (extra_qps + qp1_qps);
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, entries, 2);
+ if (rc)
+ return rc;
+ for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
+ ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
+
+ rc = bnge_backing_store_cfg(bd, ena);
+ if (rc) {
+ dev_err(bd->dev, "Failed configuring ctx mem, rc: %d\n", rc);
+ return rc;
+ }
+ ctx->flags |= BNGE_CTX_FLAG_INITED;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h
new file mode 100644
index 000000000000..300f1d8268ef
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_RMEM_H_
+#define _BNGE_RMEM_H_
+
+struct bnge_ctx_mem_type;
+struct bnge_dev;
+
+#define PTU_PTE_VALID 0x1UL
+#define PTU_PTE_LAST 0x2UL
+#define PTU_PTE_NEXT_TO_LAST 0x4UL
+
+struct bnge_ring_mem_info {
+ /* Number of pages to next level */
+ int nr_pages;
+ int page_size;
+ u16 flags;
+#define BNGE_RMEM_VALID_PTE_FLAG 1
+#define BNGE_RMEM_RING_PTE_FLAG 2
+#define BNGE_RMEM_USE_FULL_PAGE_FLAG 4
+
+ u16 depth;
+
+ void **pg_arr;
+ dma_addr_t *dma_arr;
+
+ __le64 *pg_tbl;
+ dma_addr_t dma_pg_tbl;
+
+ int vmem_size;
+ void **vmem;
+
+ struct bnge_ctx_mem_type *ctx_mem;
+};
+
+/* The hardware supports certain page sizes.
+ * Use the supported page sizes to allocate the rings.
+ */
+#if (PAGE_SHIFT < 12)
+#define BNGE_PAGE_SHIFT 12
+#elif (PAGE_SHIFT <= 13)
+#define BNGE_PAGE_SHIFT PAGE_SHIFT
+#elif (PAGE_SHIFT < 16)
+#define BNGE_PAGE_SHIFT 13
+#else
+#define BNGE_PAGE_SHIFT 16
+#endif
+#define BNGE_PAGE_SIZE (1 << BNGE_PAGE_SHIFT)
+/* The RXBD length is 16-bit so we can only support page sizes < 64K */
+#if (PAGE_SHIFT > 15)
+#define BNGE_RX_PAGE_SHIFT 15
+#else
+#define BNGE_RX_PAGE_SHIFT PAGE_SHIFT
+#endif
+#define MAX_CTX_PAGES (BNGE_PAGE_SIZE / 8)
+#define MAX_CTX_TOTAL_PAGES (MAX_CTX_PAGES * MAX_CTX_PAGES)
+
+struct bnge_ctx_pg_info {
+ u32 entries;
+ u32 nr_pages;
+ void *ctx_pg_arr[MAX_CTX_PAGES];
+ dma_addr_t ctx_dma_arr[MAX_CTX_PAGES];
+ struct bnge_ring_mem_info ring_mem;
+ struct bnge_ctx_pg_info **ctx_pg_tbl;
+};
+
+#define BNGE_MAX_TQM_SP_RINGS 1
+#define BNGE_MAX_TQM_FP_RINGS 8
+#define BNGE_MAX_TQM_RINGS \
+ (BNGE_MAX_TQM_SP_RINGS + BNGE_MAX_TQM_FP_RINGS)
+#define BNGE_BACKING_STORE_CFG_LEGACY_LEN 256
+#define BNGE_SET_CTX_PAGE_ATTR(attr) \
+do { \
+ if (BNGE_PAGE_SIZE == 0x2000) \
+ attr = FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K; \
+ else if (BNGE_PAGE_SIZE == 0x10000) \
+ attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K; \
+ else \
+ attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K; \
+} while (0)
+
+#define BNGE_CTX_MRAV_AV_SPLIT_ENTRY 0
+
+#define BNGE_CTX_QP \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP
+#define BNGE_CTX_SRQ \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ
+#define BNGE_CTX_CQ \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ
+#define BNGE_CTX_VNIC \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC
+#define BNGE_CTX_STAT \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT
+#define BNGE_CTX_STQM \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING
+#define BNGE_CTX_FTQM \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING
+#define BNGE_CTX_MRAV \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV
+#define BNGE_CTX_TIM \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM
+#define BNGE_CTX_TCK \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK
+#define BNGE_CTX_RCK \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK
+#define BNGE_CTX_MTQM \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING
+#define BNGE_CTX_SQDBS \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW
+#define BNGE_CTX_RQDBS \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW
+#define BNGE_CTX_SRQDBS \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW
+#define BNGE_CTX_CQDBS \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW
+#define BNGE_CTX_SRT_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE
+#define BNGE_CTX_SRT2_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE
+#define BNGE_CTX_CRT_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE
+#define BNGE_CTX_CRT2_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE
+#define BNGE_CTX_RIGP0_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE
+#define BNGE_CTX_L2_HWRM_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE
+#define BNGE_CTX_ROCE_HWRM_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE
+
+#define BNGE_CTX_MAX (BNGE_CTX_TIM + 1)
+#define BNGE_CTX_L2_MAX (BNGE_CTX_FTQM + 1)
+#define BNGE_CTX_INV ((u16)-1)
+
+#define BNGE_CTX_V2_MAX \
+ (FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE + 1)
+
+#define BNGE_BS_CFG_ALL_DONE \
+ FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE
+
+struct bnge_ctx_mem_type {
+ u16 type;
+ u16 entry_size;
+ u32 flags;
+#define BNGE_CTX_MEM_TYPE_VALID \
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID
+ u32 instance_bmap;
+ u8 init_value;
+ u8 entry_multiple;
+ u16 init_offset;
+#define BNGE_CTX_INIT_INVALID_OFFSET 0xffff
+ u32 max_entries;
+ u32 min_entries;
+ u8 last:1;
+ u8 split_entry_cnt;
+#define BNGE_MAX_SPLIT_ENTRY 4
+ union {
+ struct {
+ u32 qp_l2_entries;
+ u32 qp_qp1_entries;
+ u32 qp_fast_qpmd_entries;
+ };
+ u32 srq_l2_entries;
+ u32 cq_l2_entries;
+ u32 vnic_entries;
+ struct {
+ u32 mrav_av_entries;
+ u32 mrav_num_entries_units;
+ };
+ u32 split[BNGE_MAX_SPLIT_ENTRY];
+ };
+ struct bnge_ctx_pg_info *pg_info;
+};
+
+struct bnge_ctx_mem_info {
+ u8 tqm_fp_rings_count;
+ u32 flags;
+#define BNGE_CTX_FLAG_INITED 0x01
+ struct bnge_ctx_mem_type ctx_arr[BNGE_CTX_V2_MAX];
+};
+
+int bnge_alloc_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem);
+void bnge_free_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem);
+int bnge_alloc_ctx_mem(struct bnge_dev *bd);
+void bnge_free_ctx_mem(struct bnge_dev *bd);
+
+#endif /* _BNGE_RMEM_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 17ae6df90723..9af81630c8a4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -344,7 +344,7 @@ static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp,
}
}
-/* maps unmapped priorities to to the same COS as L2 */
+/* maps unmapped priorities to the same COS as L2 */
static void bnx2x_dcbx_map_nw(struct bnx2x *bp)
{
int i;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 44199855ebfb..fc8dec37a9e4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1243,9 +1243,9 @@ static int bnx2x_get_eeprom_len(struct net_device *dev)
* pf B succeeds in taking the same lock since they are from the same port.
* pf A takes the per pf misc lock. Performs eeprom access.
* pf A finishes. Unlocks the per pf misc lock.
- * Pf B takes the lock and proceeds to perform it's own access.
+ * Pf B takes the lock and proceeds to perform its own access.
* pf A unlocks the per port lock, while pf B is still working (!).
- * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
+ * mcp takes the per port lock and corrupts pf B's access (and/or has its own
* access corrupted by pf B)
*/
static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
@@ -3318,8 +3318,11 @@ static int bnx2x_set_phys_id(struct net_device *dev,
return 0;
}
-static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
+static int bnx2x_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
{
+ struct bnx2x *bp = netdev_priv(dev);
+
switch (info->flow_type) {
case TCP_V4_FLOW:
case TCP_V6_FLOW:
@@ -3361,20 +3364,21 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
case ETHTOOL_GRXRINGS:
info->data = BNX2X_NUM_ETH_QUEUES(bp);
return 0;
- case ETHTOOL_GRXFH:
- return bnx2x_get_rss_flags(bp, info);
default:
DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
return -EOPNOTSUPP;
}
}
-static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
+static int bnx2x_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *info,
+ struct netlink_ext_ack *extack)
{
+ struct bnx2x *bp = netdev_priv(dev);
int udp_rss_requested;
DP(BNX2X_MSG_ETHTOOL,
- "Set rss flags command parameters: flow type = %d, data = %llu\n",
+ "Set rss flags command parameters: flow type = %d, data = %u\n",
info->flow_type, info->data);
switch (info->flow_type) {
@@ -3460,19 +3464,6 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
}
}
-static int bnx2x_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
-{
- struct bnx2x *bp = netdev_priv(dev);
-
- switch (info->cmd) {
- case ETHTOOL_SRXFH:
- return bnx2x_set_rss_flags(bp, info);
- default:
- DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
- return -EOPNOTSUPP;
- }
-}
-
static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
{
return T_ETH_INDIRECTION_TABLE_SIZE;
@@ -3684,10 +3675,11 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.set_phys_id = bnx2x_set_phys_id,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
.get_rxnfc = bnx2x_get_rxnfc,
- .set_rxnfc = bnx2x_set_rxnfc,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
.get_rxfh = bnx2x_get_rxfh,
.set_rxfh = bnx2x_set_rxfh,
+ .get_rxfh_fields = bnx2x_get_rxfh_fields,
+ .set_rxfh_fields = bnx2x_set_rxfh_fields,
.get_channels = bnx2x_get_channels,
.set_channels = bnx2x_set_channels,
.get_module_info = bnx2x_get_module_info,
@@ -3711,10 +3703,11 @@ static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
.get_strings = bnx2x_get_strings,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
.get_rxnfc = bnx2x_get_rxnfc,
- .set_rxnfc = bnx2x_set_rxnfc,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
.get_rxfh = bnx2x_get_rxfh,
.set_rxfh = bnx2x_set_rxfh,
+ .get_rxfh_fields = bnx2x_get_rxfh_fields,
+ .set_rxfh_fields = bnx2x_set_rxfh_fields,
.get_channels = bnx2x_get_channels,
.set_channels = bnx2x_set_channels,
.get_link_ksettings = bnx2x_get_vf_link_ksettings,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index a84d015da5df..9221942290a8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -332,7 +332,7 @@
#define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
#define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
-/* microcode fixed page page size 4K (chains and ring segments) */
+/* microcode fixed page size 4K (chains and ring segments) */
#define MC_PAGE_SIZE 4096
/* Number of indices per slow-path SB */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c9a1a1d504c0..f0f05d7315ac 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -1768,7 +1768,7 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
* @bp: driver handle
*
* Returns the recovery leader resource id according to the engine this function
- * belongs to. Currently only only 2 engines is supported.
+ * belongs to. Currently only 2 engines is supported.
*/
static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
{
@@ -10219,8 +10219,7 @@ static int bnx2x_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
static const struct udp_tunnel_nic_info bnx2x_udp_tunnels = {
.sync_table = bnx2x_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
@@ -15176,7 +15175,7 @@ void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
}
/* Read the PHC */
-static u64 bnx2x_cyclecounter_read(const struct cyclecounter *cc)
+static u64 bnx2x_cyclecounter_read(struct cyclecounter *cc)
{
struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
int port = BP_PORT(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index bacc8552bce1..00ca861c80dd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -379,7 +379,7 @@ struct bnx2x_vlan_mac_obj {
/**
* Delete all configured elements having the given
* vlan_mac_flags specification. Assumes no pending for
- * execution commands. Will schedule all all currently
+ * execution commands. Will schedule all currently
* configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags
* specification for deletion and will use the given
* ramrod_flags for the last DEL operation.
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 2cb3185c442c..5578ddcb465d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -58,8 +58,8 @@
#include <net/netdev_queues.h>
#include <net/netdev_rx_queue.h>
#include <linux/pci-tph.h>
+#include <linux/bnxt/hsi.h>
-#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
@@ -477,6 +477,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct bnxt_tx_ring_info *txr;
struct bnxt_sw_tx_bd *tx_buf;
__le32 lflags = 0;
+ skb_frag_t *frag;
i = skb_get_queue_mapping(skb);
if (unlikely(i >= bp->tx_nr_rings)) {
@@ -563,7 +564,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
- !lflags) {
+ skb_frags_readable(skb) && !lflags) {
struct tx_push_buffer *tx_push_buf = txr->tx_push;
struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
@@ -598,9 +599,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_copy_from_linear_data(skb, pdata, len);
pdata += len;
for (j = 0; j < last_frag; j++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
void *fptr;
+ frag = &skb_shinfo(skb)->frags[j];
fptr = skb_frag_address_safe(frag);
if (!fptr)
goto normal_tx;
@@ -708,8 +709,7 @@ normal_tx:
cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
txbd0 = txbd;
for (i = 0; i < last_frag; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
+ frag = &skb_shinfo(skb)->frags[i];
prod = NEXT_TX(prod);
txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
@@ -721,7 +721,8 @@ normal_tx:
goto tx_dma_error;
tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
- dma_unmap_addr_set(tx_buf, mapping, mapping);
+ netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf,
+ mapping, mapping);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
@@ -778,9 +779,11 @@ tx_dma_error:
for (i = 0; i < last_frag; i++) {
prod = NEXT_TX(prod);
tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
- dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
- skb_frag_size(&skb_shinfo(skb)->frags[i]),
- DMA_TO_DEVICE);
+ frag = &skb_shinfo(skb)->frags[i];
+ netmem_dma_unmap_page_attrs(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE, 0);
}
tx_free:
@@ -809,6 +812,7 @@ static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
u16 hw_cons = txr->tx_hw_cons;
unsigned int tx_bytes = 0;
u16 cons = txr->tx_cons;
+ skb_frag_t *frag;
int tx_pkts = 0;
bool rc = false;
@@ -848,13 +852,14 @@ static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
last = tx_buf->nr_frags;
for (j = 0; j < last; j++) {
+ frag = &skb_shinfo(skb)->frags[j];
cons = NEXT_TX(cons);
tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
- dma_unmap_page(
- &pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- skb_frag_size(&skb_shinfo(skb)->frags[j]),
- DMA_TO_DEVICE);
+ netmem_dma_unmap_page_attrs(&pdev->dev,
+ dma_unmap_addr(tx_buf,
+ mapping),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE, 0);
}
if (unlikely(is_ts_pkt)) {
if (BNXT_CHIP_P5(bp)) {
@@ -1810,7 +1815,7 @@ static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
{
struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
- /* if vf-rep dev is NULL, the must belongs to the PF */
+ /* if vf-rep dev is NULL, it must belong to the PF */
return dev ? dev : bp->dev;
}
@@ -2989,6 +2994,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
{
struct bnxt_napi *bnapi = cpr->bnapi;
u32 raw_cons = cpr->cp_raw_cons;
+ bool flush_xdp = false;
u32 cons;
int rx_pkts = 0;
u8 event = 0;
@@ -3042,6 +3048,8 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
else
rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
&event);
+ if (event & BNXT_REDIRECT_EVENT)
+ flush_xdp = true;
if (likely(rc >= 0))
rx_pkts += rc;
/* Increment rx_pkts when rc is -ENOMEM to count towards
@@ -3066,7 +3074,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
}
- if (event & BNXT_REDIRECT_EVENT) {
+ if (flush_xdp) {
xdp_do_flush();
event &= ~BNXT_REDIRECT_EVENT;
}
@@ -3422,9 +3430,11 @@ static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp,
skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
tx_buf = &txr->tx_buf_ring[ring_idx];
- dma_unmap_page(&pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- skb_frag_size(frag), DMA_TO_DEVICE);
+ netmem_dma_unmap_page_attrs(&pdev->dev,
+ dma_unmap_addr(tx_buf,
+ mapping),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE, 0);
}
dev_kfree_skb(skb);
}
@@ -3800,12 +3810,14 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr,
int numa_node)
{
+ const unsigned int agg_size_fac = PAGE_SIZE / BNXT_RX_PAGE_SIZE;
+ const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
struct page_pool_params pp = { 0 };
struct page_pool *pool;
- pp.pool_size = bp->rx_agg_ring_size;
+ pp.pool_size = bp->rx_agg_ring_size / agg_size_fac;
if (BNXT_RX_PAGE_MODE(bp))
- pp.pool_size += bp->rx_ring_size;
+ pp.pool_size += bp->rx_ring_size / rx_size_fac;
pp.nid = numa_node;
pp.napi = &rxr->bnapi->napi;
pp.netdev = bp->dev;
@@ -3823,7 +3835,7 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
rxr->need_head_pool = page_pool_is_unreadable(pool);
if (bnxt_separate_head_pool(rxr)) {
- pp.pool_size = max(bp->rx_ring_size, 1024);
+ pp.pool_size = min(bp->rx_ring_size / rx_size_fac, 1024);
pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
pool = page_pool_create(&pp);
if (IS_ERR(pool))
@@ -7116,7 +7128,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
default:
netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
ring_type);
- return -1;
+ return -EINVAL;
}
resp = hwrm_req_hold(bp, req);
@@ -11604,11 +11616,9 @@ static void bnxt_free_irq(struct bnxt *bp)
static int bnxt_request_irq(struct bnxt *bp)
{
+ struct cpu_rmap *rmap = NULL;
int i, j, rc = 0;
unsigned long flags = 0;
-#ifdef CONFIG_RFS_ACCEL
- struct cpu_rmap *rmap;
-#endif
rc = bnxt_setup_int_mode(bp);
if (rc) {
@@ -11629,15 +11639,15 @@ static int bnxt_request_irq(struct bnxt *bp)
int map_idx = bnxt_cp_num_to_irq_num(bp, i);
struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
-#ifdef CONFIG_RFS_ACCEL
- if (rmap && bp->bnapi[i]->rx_ring) {
+ if (IS_ENABLED(CONFIG_RFS_ACCEL) &&
+ rmap && bp->bnapi[i]->rx_ring) {
rc = irq_cpu_rmap_add(rmap, irq->vector);
if (rc)
netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
j);
j++;
}
-#endif
+
rc = request_irq(irq->vector, irq->handler, flags, irq->name,
bp->bnapi[i]);
if (rc)
@@ -14121,28 +14131,13 @@ static void bnxt_unlock_sp(struct bnxt *bp)
netdev_unlock(bp->dev);
}
-/* Same as bnxt_lock_sp() with additional rtnl_lock */
-static void bnxt_rtnl_lock_sp(struct bnxt *bp)
-{
- clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- rtnl_lock();
- netdev_lock(bp->dev);
-}
-
-static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
-{
- set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- netdev_unlock(bp->dev);
- rtnl_unlock();
-}
-
/* Only called from bnxt_sp_task() */
static void bnxt_reset(struct bnxt *bp, bool silent)
{
- bnxt_rtnl_lock_sp(bp);
+ bnxt_lock_sp(bp);
if (test_bit(BNXT_STATE_OPEN, &bp->state))
bnxt_reset_task(bp, silent);
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
/* Only called from bnxt_sp_task() */
@@ -14150,9 +14145,9 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
{
int i;
- bnxt_rtnl_lock_sp(bp);
+ bnxt_lock_sp(bp);
if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
return;
}
/* Disable and flush TPA before resetting the RX ring */
@@ -14191,7 +14186,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
}
if (bp->flags & BNXT_FLAG_TPA)
bnxt_set_tpa(bp, true);
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
static void bnxt_fw_fatal_close(struct bnxt *bp)
@@ -15083,17 +15078,15 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
fallthrough;
case BNXT_FW_RESET_STATE_OPENING:
- while (!rtnl_trylock()) {
+ while (!netdev_trylock(bp->dev)) {
bnxt_queue_fw_reset_work(bp, HZ / 10);
return;
}
- netdev_lock(bp->dev);
rc = bnxt_open(bp->dev);
if (rc) {
netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
bnxt_fw_reset_abort(bp, rc);
netdev_unlock(bp->dev);
- rtnl_unlock();
goto ulp_start;
}
@@ -15113,7 +15106,6 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bnxt_dl_health_fw_status_update(bp, true);
}
netdev_unlock(bp->dev);
- rtnl_unlock();
bnxt_ulp_start(bp, 0);
bnxt_reenable_sriov(bp);
netdev_lock(bp->dev);
@@ -15639,8 +15631,7 @@ static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int ta
static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
.set_port = bnxt_udp_tunnel_set_port,
.unset_port = bnxt_udp_tunnel_unset_port,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
@@ -15648,8 +15639,7 @@ static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
}, bnxt_udp_tunnels_p7 = {
.set_port = bnxt_udp_tunnel_set_port,
.unset_port = bnxt_udp_tunnel_unset_port,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
@@ -16059,7 +16049,7 @@ err_reset:
rc);
napi_enable_locked(&bnapi->napi);
bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
- netif_close(dev);
+ bnxt_reset_task(bp, true);
return rc;
}
@@ -16774,6 +16764,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (BNXT_SUPPORTS_QUEUE_API(bp))
dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
dev->request_ops_lock = true;
+ dev->netmem_tx = true;
rc = register_netdev(dev);
if (rc)
@@ -16875,7 +16866,6 @@ static int bnxt_resume(struct device *device)
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
- rtnl_lock();
netdev_lock(dev);
rc = pci_enable_device(bp->pdev);
if (rc) {
@@ -16920,7 +16910,6 @@ static int bnxt_resume(struct device *device)
resume_exit:
netdev_unlock(bp->dev);
- rtnl_unlock();
bnxt_ulp_start(bp, rc);
if (!rc)
bnxt_reenable_sriov(bp);
@@ -16985,7 +16974,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
bnxt_free_ctx_mem(bp, false);
netdev_unlock(netdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
@@ -17086,7 +17075,6 @@ static void bnxt_io_resume(struct pci_dev *pdev)
int err;
netdev_info(bp->dev, "PCI Slot Resume\n");
- rtnl_lock();
netdev_lock(netdev);
err = bnxt_hwrm_func_qcaps(bp);
@@ -17104,7 +17092,6 @@ static void bnxt_io_resume(struct pci_dev *pdev)
netif_device_attach(netdev);
netdev_unlock(netdev);
- rtnl_unlock();
bnxt_ulp_start(bp, err);
if (!err)
bnxt_reenable_sriov(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
index ce97befd3cb3..18d6c94d5cb8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
@@ -10,7 +10,7 @@
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/pci.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_coredump.h"
@@ -368,23 +368,27 @@ static u32 bnxt_get_ctx_coredump(struct bnxt *bp, void *buf, u32 offset,
if (!ctxm->mem_valid || !seg_id)
continue;
- if (trace)
+ if (trace) {
extra_hlen = BNXT_SEG_RCD_LEN;
+ if (buf) {
+ u16 trace_type = bnxt_bstore_to_trace[type];
+
+ bnxt_fill_drv_seg_record(bp, &record, ctxm,
+ trace_type);
+ }
+ }
+
if (buf)
data = buf + BNXT_SEG_HDR_LEN + extra_hlen;
+
seg_len = bnxt_copy_ctx_mem(bp, ctxm, data, 0) + extra_hlen;
if (buf) {
bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, seg_len,
0, 0, 0, comp_id, seg_id);
memcpy(buf, &seg_hdr, BNXT_SEG_HDR_LEN);
buf += BNXT_SEG_HDR_LEN;
- if (trace) {
- u16 trace_type = bnxt_bstore_to_trace[type];
-
- bnxt_fill_drv_seg_record(bp, &record, ctxm,
- trace_type);
+ if (trace)
memcpy(buf, &record, BNXT_SEG_RCD_LEN);
- }
buf += seg_len;
}
len += BNXT_SEG_HDR_LEN + seg_len;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 0dbb880a7aa0..a00b67334f9b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -16,7 +16,7 @@
#include <linux/pci.h>
#include <linux/etherdevice.h>
#include <rdma/ib_verbs.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_dcb.h"
@@ -487,7 +487,9 @@ static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && i > bp->max_tc)
return -EINVAL;
+ }
+ for (i = 0; i < max_tc; i++) {
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_STRICT:
break;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
index 127b7015f676..3324afbb3bec 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
@@ -10,7 +10,7 @@
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include <linux/dim.h>
#include "bnxt.h"
#include "bnxt_debugfs.h"
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h
index d0bb4887acd0..a0a8d687dd99 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h
@@ -7,7 +7,7 @@
* the Free Software Foundation.
*/
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 777880594a04..4c4581b0342e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -12,7 +12,7 @@
#include <linux/vmalloc.h>
#include <net/devlink.h>
#include <net/netdev_lock.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_vfr.h"
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
index 6f6576dc417a..53a3bcb0efe0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
@@ -8,7 +8,7 @@
*/
#include <linux/dim.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
void bnxt_dim_work(struct work_struct *work)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index f5d490bf997e..1b37612b1c01 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -26,7 +26,7 @@
#include <linux/timecounter.h>
#include <net/netdev_queues.h>
#include <net/netlink.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
@@ -1587,8 +1587,11 @@ static u64 get_ethtool_ipv6_rss(struct bnxt *bp)
return 0;
}
-static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+static int bnxt_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct bnxt *bp = netdev_priv(dev);
+
cmd->data = 0;
switch (cmd->flow_type) {
case TCP_V4_FLOW:
@@ -1647,10 +1650,15 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
#define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
#define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST)
-static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+static int bnxt_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
- u32 rss_hash_cfg = bp->rss_hash_cfg;
+ struct bnxt *bp = netdev_priv(dev);
int tuple, rc = 0;
+ u32 rss_hash_cfg;
+
+ rss_hash_cfg = bp->rss_hash_cfg;
if (cmd->data == RXH_4TUPLE)
tuple = 4;
@@ -1768,10 +1776,6 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
rc = bnxt_grxclsrule(bp, cmd);
break;
- case ETHTOOL_GRXFH:
- rc = bnxt_grxfh(bp, cmd);
- break;
-
default:
rc = -EOPNOTSUPP;
break;
@@ -1786,10 +1790,6 @@ static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
int rc;
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- rc = bnxt_srxfh(bp, cmd);
- break;
-
case ETHTOOL_SRXCLSRLINS:
rc = bnxt_srxclsrlins(bp, cmd);
break;
@@ -5521,6 +5521,8 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_rxfh_key_size = bnxt_get_rxfh_key_size,
.get_rxfh = bnxt_get_rxfh,
.set_rxfh = bnxt_set_rxfh,
+ .get_rxfh_fields = bnxt_get_rxfh_fields,
+ .set_rxfh_fields = bnxt_set_rxfh_fields,
.create_rxfh_context = bnxt_create_rxfh_context,
.modify_rxfh_context = bnxt_modify_rxfh_context,
.remove_rxfh_context = bnxt_remove_rxfh_context,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
deleted file mode 100644
index 549231703bce..000000000000
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ /dev/null
@@ -1,10914 +0,0 @@
-/* Broadcom NetXtreme-C/E network driver.
- *
- * Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2014-2018 Broadcom Limited
- * Copyright (c) 2018-2025 Broadcom Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation.
- *
- * DO NOT MODIFY!!! This file is automatically generated.
- */
-
-#ifndef _BNXT_HSI_H_
-#define _BNXT_HSI_H_
-
-/* hwrm_cmd_hdr (size:128b/16B) */
-struct hwrm_cmd_hdr {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_resp_hdr (size:64b/8B) */
-struct hwrm_resp_hdr {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
-};
-
-#define CMD_DISCR_TLV_ENCAP 0x8000UL
-#define CMD_DISCR_LAST CMD_DISCR_TLV_ENCAP
-
-
-#define TLV_TYPE_HWRM_REQUEST 0x1UL
-#define TLV_TYPE_HWRM_RESPONSE 0x2UL
-#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL
-#define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL
-#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL
-#define TLV_TYPE_QUERY_ROCE_CC_GEN2 0x6UL
-#define TLV_TYPE_MODIFY_ROCE_CC_GEN2 0x7UL
-#define TLV_TYPE_QUERY_ROCE_CC_GEN1_EXT 0x8UL
-#define TLV_TYPE_MODIFY_ROCE_CC_GEN1_EXT 0x9UL
-#define TLV_TYPE_QUERY_ROCE_CC_GEN2_EXT 0xaUL
-#define TLV_TYPE_MODIFY_ROCE_CC_GEN2_EXT 0xbUL
-#define TLV_TYPE_ENGINE_CKV_ALIAS_ECC_PUBLIC_KEY 0x8001UL
-#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL
-#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL
-#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT 0x8005UL
-#define TLV_TYPE_ENGINE_CKV_HOST_ALGORITHMS 0x8006UL
-#define TLV_TYPE_ENGINE_CKV_HOST_ECC_PUBLIC_KEY 0x8007UL
-#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE 0x8008UL
-#define TLV_TYPE_ENGINE_CKV_FW_ECC_PUBLIC_KEY 0x8009UL
-#define TLV_TYPE_ENGINE_CKV_FW_ALGORITHMS 0x800aUL
-#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_FW_ALGORITHMS
-
-
-/* tlv (size:64b/8B) */
-struct tlv {
- __le16 cmd_discr;
- u8 reserved_8b;
- u8 flags;
- #define TLV_FLAGS_MORE 0x1UL
- #define TLV_FLAGS_MORE_LAST 0x0UL
- #define TLV_FLAGS_MORE_NOT_LAST 0x1UL
- #define TLV_FLAGS_REQUIRED 0x2UL
- #define TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
- #define TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
- #define TLV_FLAGS_REQUIRED_LAST TLV_FLAGS_REQUIRED_YES
- __le16 tlv_type;
- __le16 length;
-};
-
-/* input (size:128b/16B) */
-struct input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* output (size:64b/8B) */
-struct output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
-};
-
-/* hwrm_short_input (size:128b/16B) */
-struct hwrm_short_input {
- __le16 req_type;
- __le16 signature;
- #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL
- #define SHORT_REQ_SIGNATURE_LAST SHORT_REQ_SIGNATURE_SHORT_CMD
- __le16 target_id;
- #define SHORT_REQ_TARGET_ID_DEFAULT 0x0UL
- #define SHORT_REQ_TARGET_ID_TOOLS 0xfffdUL
- #define SHORT_REQ_TARGET_ID_LAST SHORT_REQ_TARGET_ID_TOOLS
- __le16 size;
- __le64 req_addr;
-};
-
-/* cmd_nums (size:64b/8B) */
-struct cmd_nums {
- __le16 req_type;
- #define HWRM_VER_GET 0x0UL
- #define HWRM_FUNC_ECHO_RESPONSE 0xbUL
- #define HWRM_ERROR_RECOVERY_QCFG 0xcUL
- #define HWRM_FUNC_DRV_IF_CHANGE 0xdUL
- #define HWRM_FUNC_BUF_UNRGTR 0xeUL
- #define HWRM_FUNC_VF_CFG 0xfUL
- #define HWRM_RESERVED1 0x10UL
- #define HWRM_FUNC_RESET 0x11UL
- #define HWRM_FUNC_GETFID 0x12UL
- #define HWRM_FUNC_VF_ALLOC 0x13UL
- #define HWRM_FUNC_VF_FREE 0x14UL
- #define HWRM_FUNC_QCAPS 0x15UL
- #define HWRM_FUNC_QCFG 0x16UL
- #define HWRM_FUNC_CFG 0x17UL
- #define HWRM_FUNC_QSTATS 0x18UL
- #define HWRM_FUNC_CLR_STATS 0x19UL
- #define HWRM_FUNC_DRV_UNRGTR 0x1aUL
- #define HWRM_FUNC_VF_RESC_FREE 0x1bUL
- #define HWRM_FUNC_VF_VNIC_IDS_QUERY 0x1cUL
- #define HWRM_FUNC_DRV_RGTR 0x1dUL
- #define HWRM_FUNC_DRV_QVER 0x1eUL
- #define HWRM_FUNC_BUF_RGTR 0x1fUL
- #define HWRM_PORT_PHY_CFG 0x20UL
- #define HWRM_PORT_MAC_CFG 0x21UL
- #define HWRM_PORT_TS_QUERY 0x22UL
- #define HWRM_PORT_QSTATS 0x23UL
- #define HWRM_PORT_LPBK_QSTATS 0x24UL
- #define HWRM_PORT_CLR_STATS 0x25UL
- #define HWRM_PORT_LPBK_CLR_STATS 0x26UL
- #define HWRM_PORT_PHY_QCFG 0x27UL
- #define HWRM_PORT_MAC_QCFG 0x28UL
- #define HWRM_PORT_MAC_PTP_QCFG 0x29UL
- #define HWRM_PORT_PHY_QCAPS 0x2aUL
- #define HWRM_PORT_PHY_I2C_WRITE 0x2bUL
- #define HWRM_PORT_PHY_I2C_READ 0x2cUL
- #define HWRM_PORT_LED_CFG 0x2dUL
- #define HWRM_PORT_LED_QCFG 0x2eUL
- #define HWRM_PORT_LED_QCAPS 0x2fUL
- #define HWRM_QUEUE_QPORTCFG 0x30UL
- #define HWRM_QUEUE_QCFG 0x31UL
- #define HWRM_QUEUE_CFG 0x32UL
- #define HWRM_FUNC_VLAN_CFG 0x33UL
- #define HWRM_FUNC_VLAN_QCFG 0x34UL
- #define HWRM_QUEUE_PFCENABLE_QCFG 0x35UL
- #define HWRM_QUEUE_PFCENABLE_CFG 0x36UL
- #define HWRM_QUEUE_PRI2COS_QCFG 0x37UL
- #define HWRM_QUEUE_PRI2COS_CFG 0x38UL
- #define HWRM_QUEUE_COS2BW_QCFG 0x39UL
- #define HWRM_QUEUE_COS2BW_CFG 0x3aUL
- #define HWRM_QUEUE_DSCP_QCAPS 0x3bUL
- #define HWRM_QUEUE_DSCP2PRI_QCFG 0x3cUL
- #define HWRM_QUEUE_DSCP2PRI_CFG 0x3dUL
- #define HWRM_VNIC_ALLOC 0x40UL
- #define HWRM_VNIC_FREE 0x41UL
- #define HWRM_VNIC_CFG 0x42UL
- #define HWRM_VNIC_QCFG 0x43UL
- #define HWRM_VNIC_TPA_CFG 0x44UL
- #define HWRM_VNIC_TPA_QCFG 0x45UL
- #define HWRM_VNIC_RSS_CFG 0x46UL
- #define HWRM_VNIC_RSS_QCFG 0x47UL
- #define HWRM_VNIC_PLCMODES_CFG 0x48UL
- #define HWRM_VNIC_PLCMODES_QCFG 0x49UL
- #define HWRM_VNIC_QCAPS 0x4aUL
- #define HWRM_VNIC_UPDATE 0x4bUL
- #define HWRM_RING_ALLOC 0x50UL
- #define HWRM_RING_FREE 0x51UL
- #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL
- #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL
- #define HWRM_RING_AGGINT_QCAPS 0x54UL
- #define HWRM_RING_SCHQ_ALLOC 0x55UL
- #define HWRM_RING_SCHQ_CFG 0x56UL
- #define HWRM_RING_SCHQ_FREE 0x57UL
- #define HWRM_RING_RESET 0x5eUL
- #define HWRM_RING_GRP_ALLOC 0x60UL
- #define HWRM_RING_GRP_FREE 0x61UL
- #define HWRM_RING_CFG 0x62UL
- #define HWRM_RING_QCFG 0x63UL
- #define HWRM_RESERVED5 0x64UL
- #define HWRM_RESERVED6 0x65UL
- #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL
- #define HWRM_VNIC_RSS_COS_LB_CTX_FREE 0x71UL
- #define HWRM_QUEUE_MPLS_QCAPS 0x80UL
- #define HWRM_QUEUE_MPLSTC2PRI_QCFG 0x81UL
- #define HWRM_QUEUE_MPLSTC2PRI_CFG 0x82UL
- #define HWRM_QUEUE_VLANPRI_QCAPS 0x83UL
- #define HWRM_QUEUE_VLANPRI2PRI_QCFG 0x84UL
- #define HWRM_QUEUE_VLANPRI2PRI_CFG 0x85UL
- #define HWRM_QUEUE_GLOBAL_CFG 0x86UL
- #define HWRM_QUEUE_GLOBAL_QCFG 0x87UL
- #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG 0x88UL
- #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG 0x89UL
- #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG 0x8aUL
- #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG 0x8bUL
- #define HWRM_QUEUE_QCAPS 0x8cUL
- #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_QCFG 0x8dUL
- #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG 0x8eUL
- #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_QCFG 0x8fUL
- #define HWRM_CFA_L2_FILTER_ALLOC 0x90UL
- #define HWRM_CFA_L2_FILTER_FREE 0x91UL
- #define HWRM_CFA_L2_FILTER_CFG 0x92UL
- #define HWRM_CFA_L2_SET_RX_MASK 0x93UL
- #define HWRM_CFA_VLAN_ANTISPOOF_CFG 0x94UL
- #define HWRM_CFA_TUNNEL_FILTER_ALLOC 0x95UL
- #define HWRM_CFA_TUNNEL_FILTER_FREE 0x96UL
- #define HWRM_CFA_ENCAP_RECORD_ALLOC 0x97UL
- #define HWRM_CFA_ENCAP_RECORD_FREE 0x98UL
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC 0x99UL
- #define HWRM_CFA_NTUPLE_FILTER_FREE 0x9aUL
- #define HWRM_CFA_NTUPLE_FILTER_CFG 0x9bUL
- #define HWRM_CFA_EM_FLOW_ALLOC 0x9cUL
- #define HWRM_CFA_EM_FLOW_FREE 0x9dUL
- #define HWRM_CFA_EM_FLOW_CFG 0x9eUL
- #define HWRM_TUNNEL_DST_PORT_QUERY 0xa0UL
- #define HWRM_TUNNEL_DST_PORT_ALLOC 0xa1UL
- #define HWRM_TUNNEL_DST_PORT_FREE 0xa2UL
- #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG 0xa3UL
- #define HWRM_STAT_CTX_ENG_QUERY 0xafUL
- #define HWRM_STAT_CTX_ALLOC 0xb0UL
- #define HWRM_STAT_CTX_FREE 0xb1UL
- #define HWRM_STAT_CTX_QUERY 0xb2UL
- #define HWRM_STAT_CTX_CLR_STATS 0xb3UL
- #define HWRM_PORT_QSTATS_EXT 0xb4UL
- #define HWRM_PORT_PHY_MDIO_WRITE 0xb5UL
- #define HWRM_PORT_PHY_MDIO_READ 0xb6UL
- #define HWRM_PORT_PHY_MDIO_BUS_ACQUIRE 0xb7UL
- #define HWRM_PORT_PHY_MDIO_BUS_RELEASE 0xb8UL
- #define HWRM_PORT_QSTATS_EXT_PFC_WD 0xb9UL
- #define HWRM_RESERVED7 0xbaUL
- #define HWRM_PORT_TX_FIR_CFG 0xbbUL
- #define HWRM_PORT_TX_FIR_QCFG 0xbcUL
- #define HWRM_PORT_ECN_QSTATS 0xbdUL
- #define HWRM_FW_LIVEPATCH_QUERY 0xbeUL
- #define HWRM_FW_LIVEPATCH 0xbfUL
- #define HWRM_FW_RESET 0xc0UL
- #define HWRM_FW_QSTATUS 0xc1UL
- #define HWRM_FW_HEALTH_CHECK 0xc2UL
- #define HWRM_FW_SYNC 0xc3UL
- #define HWRM_FW_STATE_QCAPS 0xc4UL
- #define HWRM_FW_STATE_QUIESCE 0xc5UL
- #define HWRM_FW_STATE_BACKUP 0xc6UL
- #define HWRM_FW_STATE_RESTORE 0xc7UL
- #define HWRM_FW_SET_TIME 0xc8UL
- #define HWRM_FW_GET_TIME 0xc9UL
- #define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL
- #define HWRM_FW_GET_STRUCTURED_DATA 0xcbUL
- #define HWRM_FW_IPC_MAILBOX 0xccUL
- #define HWRM_FW_ECN_CFG 0xcdUL
- #define HWRM_FW_ECN_QCFG 0xceUL
- #define HWRM_FW_SECURE_CFG 0xcfUL
- #define HWRM_EXEC_FWD_RESP 0xd0UL
- #define HWRM_REJECT_FWD_RESP 0xd1UL
- #define HWRM_FWD_RESP 0xd2UL
- #define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL
- #define HWRM_OEM_CMD 0xd4UL
- #define HWRM_PORT_PRBS_TEST 0xd5UL
- #define HWRM_PORT_SFP_SIDEBAND_CFG 0xd6UL
- #define HWRM_PORT_SFP_SIDEBAND_QCFG 0xd7UL
- #define HWRM_FW_STATE_UNQUIESCE 0xd8UL
- #define HWRM_PORT_DSC_DUMP 0xd9UL
- #define HWRM_PORT_EP_TX_QCFG 0xdaUL
- #define HWRM_PORT_EP_TX_CFG 0xdbUL
- #define HWRM_PORT_CFG 0xdcUL
- #define HWRM_PORT_QCFG 0xddUL
- #define HWRM_PORT_MAC_QCAPS 0xdfUL
- #define HWRM_TEMP_MONITOR_QUERY 0xe0UL
- #define HWRM_REG_POWER_QUERY 0xe1UL
- #define HWRM_CORE_FREQUENCY_QUERY 0xe2UL
- #define HWRM_REG_POWER_HISTOGRAM 0xe3UL
- #define HWRM_WOL_FILTER_ALLOC 0xf0UL
- #define HWRM_WOL_FILTER_FREE 0xf1UL
- #define HWRM_WOL_FILTER_QCFG 0xf2UL
- #define HWRM_WOL_REASON_QCFG 0xf3UL
- #define HWRM_CFA_METER_QCAPS 0xf4UL
- #define HWRM_CFA_METER_PROFILE_ALLOC 0xf5UL
- #define HWRM_CFA_METER_PROFILE_FREE 0xf6UL
- #define HWRM_CFA_METER_PROFILE_CFG 0xf7UL
- #define HWRM_CFA_METER_INSTANCE_ALLOC 0xf8UL
- #define HWRM_CFA_METER_INSTANCE_FREE 0xf9UL
- #define HWRM_CFA_METER_INSTANCE_CFG 0xfaUL
- #define HWRM_CFA_VFR_ALLOC 0xfdUL
- #define HWRM_CFA_VFR_FREE 0xfeUL
- #define HWRM_CFA_VF_PAIR_ALLOC 0x100UL
- #define HWRM_CFA_VF_PAIR_FREE 0x101UL
- #define HWRM_CFA_VF_PAIR_INFO 0x102UL
- #define HWRM_CFA_FLOW_ALLOC 0x103UL
- #define HWRM_CFA_FLOW_FREE 0x104UL
- #define HWRM_CFA_FLOW_FLUSH 0x105UL
- #define HWRM_CFA_FLOW_STATS 0x106UL
- #define HWRM_CFA_FLOW_INFO 0x107UL
- #define HWRM_CFA_DECAP_FILTER_ALLOC 0x108UL
- #define HWRM_CFA_DECAP_FILTER_FREE 0x109UL
- #define HWRM_CFA_VLAN_ANTISPOOF_QCFG 0x10aUL
- #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC 0x10bUL
- #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE 0x10cUL
- #define HWRM_CFA_PAIR_ALLOC 0x10dUL
- #define HWRM_CFA_PAIR_FREE 0x10eUL
- #define HWRM_CFA_PAIR_INFO 0x10fUL
- #define HWRM_FW_IPC_MSG 0x110UL
- #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL
- #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE 0x112UL
- #define HWRM_CFA_FLOW_AGING_TIMER_RESET 0x113UL
- #define HWRM_CFA_FLOW_AGING_CFG 0x114UL
- #define HWRM_CFA_FLOW_AGING_QCFG 0x115UL
- #define HWRM_CFA_FLOW_AGING_QCAPS 0x116UL
- #define HWRM_CFA_CTX_MEM_RGTR 0x117UL
- #define HWRM_CFA_CTX_MEM_UNRGTR 0x118UL
- #define HWRM_CFA_CTX_MEM_QCTX 0x119UL
- #define HWRM_CFA_CTX_MEM_QCAPS 0x11aUL
- #define HWRM_CFA_COUNTER_QCAPS 0x11bUL
- #define HWRM_CFA_COUNTER_CFG 0x11cUL
- #define HWRM_CFA_COUNTER_QCFG 0x11dUL
- #define HWRM_CFA_COUNTER_QSTATS 0x11eUL
- #define HWRM_CFA_TCP_FLAG_PROCESS_QCFG 0x11fUL
- #define HWRM_CFA_EEM_QCAPS 0x120UL
- #define HWRM_CFA_EEM_CFG 0x121UL
- #define HWRM_CFA_EEM_QCFG 0x122UL
- #define HWRM_CFA_EEM_OP 0x123UL
- #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL
- #define HWRM_CFA_TFLIB 0x125UL
- #define HWRM_CFA_LAG_GROUP_MEMBER_RGTR 0x126UL
- #define HWRM_CFA_LAG_GROUP_MEMBER_UNRGTR 0x127UL
- #define HWRM_CFA_TLS_FILTER_ALLOC 0x128UL
- #define HWRM_CFA_TLS_FILTER_FREE 0x129UL
- #define HWRM_CFA_RELEASE_AFM_FUNC 0x12aUL
- #define HWRM_ENGINE_CKV_STATUS 0x12eUL
- #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
- #define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL
- #define HWRM_ENGINE_CKV_KEY_ADD 0x131UL
- #define HWRM_ENGINE_CKV_KEY_DELETE 0x132UL
- #define HWRM_ENGINE_CKV_FLUSH 0x133UL
- #define HWRM_ENGINE_CKV_RNG_GET 0x134UL
- #define HWRM_ENGINE_CKV_KEY_GEN 0x135UL
- #define HWRM_ENGINE_CKV_KEY_LABEL_CFG 0x136UL
- #define HWRM_ENGINE_CKV_KEY_LABEL_QCFG 0x137UL
- #define HWRM_ENGINE_QG_CONFIG_QUERY 0x13cUL
- #define HWRM_ENGINE_QG_QUERY 0x13dUL
- #define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY 0x13eUL
- #define HWRM_ENGINE_QG_METER_PROFILE_QUERY 0x13fUL
- #define HWRM_ENGINE_QG_METER_PROFILE_ALLOC 0x140UL
- #define HWRM_ENGINE_QG_METER_PROFILE_FREE 0x141UL
- #define HWRM_ENGINE_QG_METER_QUERY 0x142UL
- #define HWRM_ENGINE_QG_METER_BIND 0x143UL
- #define HWRM_ENGINE_QG_METER_UNBIND 0x144UL
- #define HWRM_ENGINE_QG_FUNC_BIND 0x145UL
- #define HWRM_ENGINE_SG_CONFIG_QUERY 0x146UL
- #define HWRM_ENGINE_SG_QUERY 0x147UL
- #define HWRM_ENGINE_SG_METER_QUERY 0x148UL
- #define HWRM_ENGINE_SG_METER_CONFIG 0x149UL
- #define HWRM_ENGINE_SG_QG_BIND 0x14aUL
- #define HWRM_ENGINE_QG_SG_UNBIND 0x14bUL
- #define HWRM_ENGINE_CONFIG_QUERY 0x154UL
- #define HWRM_ENGINE_STATS_CONFIG 0x155UL
- #define HWRM_ENGINE_STATS_CLEAR 0x156UL
- #define HWRM_ENGINE_STATS_QUERY 0x157UL
- #define HWRM_ENGINE_STATS_QUERY_CONTINUOUS_ERROR 0x158UL
- #define HWRM_ENGINE_RQ_ALLOC 0x15eUL
- #define HWRM_ENGINE_RQ_FREE 0x15fUL
- #define HWRM_ENGINE_CQ_ALLOC 0x160UL
- #define HWRM_ENGINE_CQ_FREE 0x161UL
- #define HWRM_ENGINE_NQ_ALLOC 0x162UL
- #define HWRM_ENGINE_NQ_FREE 0x163UL
- #define HWRM_ENGINE_ON_DIE_RQE_CREDITS 0x164UL
- #define HWRM_ENGINE_FUNC_QCFG 0x165UL
- #define HWRM_FUNC_RESOURCE_QCAPS 0x190UL
- #define HWRM_FUNC_VF_RESOURCE_CFG 0x191UL
- #define HWRM_FUNC_BACKING_STORE_QCAPS 0x192UL
- #define HWRM_FUNC_BACKING_STORE_CFG 0x193UL
- #define HWRM_FUNC_BACKING_STORE_QCFG 0x194UL
- #define HWRM_FUNC_VF_BW_CFG 0x195UL
- #define HWRM_FUNC_VF_BW_QCFG 0x196UL
- #define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL
- #define HWRM_FUNC_QSTATS_EXT 0x198UL
- #define HWRM_STAT_EXT_CTX_QUERY 0x199UL
- #define HWRM_FUNC_SPD_CFG 0x19aUL
- #define HWRM_FUNC_SPD_QCFG 0x19bUL
- #define HWRM_FUNC_PTP_PIN_QCFG 0x19cUL
- #define HWRM_FUNC_PTP_PIN_CFG 0x19dUL
- #define HWRM_FUNC_PTP_CFG 0x19eUL
- #define HWRM_FUNC_PTP_TS_QUERY 0x19fUL
- #define HWRM_FUNC_PTP_EXT_CFG 0x1a0UL
- #define HWRM_FUNC_PTP_EXT_QCFG 0x1a1UL
- #define HWRM_FUNC_KEY_CTX_ALLOC 0x1a2UL
- #define HWRM_FUNC_BACKING_STORE_CFG_V2 0x1a3UL
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2 0x1a4UL
- #define HWRM_FUNC_DBR_PACING_CFG 0x1a5UL
- #define HWRM_FUNC_DBR_PACING_QCFG 0x1a6UL
- #define HWRM_FUNC_DBR_PACING_BROADCAST_EVENT 0x1a7UL
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2 0x1a8UL
- #define HWRM_FUNC_DBR_PACING_NQLIST_QUERY 0x1a9UL
- #define HWRM_FUNC_DBR_RECOVERY_COMPLETED 0x1aaUL
- #define HWRM_FUNC_SYNCE_CFG 0x1abUL
- #define HWRM_FUNC_SYNCE_QCFG 0x1acUL
- #define HWRM_FUNC_KEY_CTX_FREE 0x1adUL
- #define HWRM_FUNC_LAG_MODE_CFG 0x1aeUL
- #define HWRM_FUNC_LAG_MODE_QCFG 0x1afUL
- #define HWRM_FUNC_LAG_CREATE 0x1b0UL
- #define HWRM_FUNC_LAG_UPDATE 0x1b1UL
- #define HWRM_FUNC_LAG_FREE 0x1b2UL
- #define HWRM_FUNC_LAG_QCFG 0x1b3UL
- #define HWRM_FUNC_TIMEDTX_PACING_RATE_ADD 0x1c2UL
- #define HWRM_FUNC_TIMEDTX_PACING_RATE_DELETE 0x1c3UL
- #define HWRM_FUNC_TIMEDTX_PACING_RATE_QUERY 0x1c4UL
- #define HWRM_SELFTEST_QLIST 0x200UL
- #define HWRM_SELFTEST_EXEC 0x201UL
- #define HWRM_SELFTEST_IRQ 0x202UL
- #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA 0x203UL
- #define HWRM_PCIE_QSTATS 0x204UL
- #define HWRM_MFG_FRU_WRITE_CONTROL 0x205UL
- #define HWRM_MFG_TIMERS_QUERY 0x206UL
- #define HWRM_MFG_OTP_CFG 0x207UL
- #define HWRM_MFG_OTP_QCFG 0x208UL
- #define HWRM_MFG_HDMA_TEST 0x209UL
- #define HWRM_MFG_FRU_EEPROM_WRITE 0x20aUL
- #define HWRM_MFG_FRU_EEPROM_READ 0x20bUL
- #define HWRM_MFG_SOC_IMAGE 0x20cUL
- #define HWRM_MFG_SOC_QSTATUS 0x20dUL
- #define HWRM_MFG_PARAM_CRITICAL_DATA_FINALIZE 0x20eUL
- #define HWRM_MFG_PARAM_CRITICAL_DATA_READ 0x20fUL
- #define HWRM_MFG_PARAM_CRITICAL_DATA_HEALTH 0x210UL
- #define HWRM_MFG_PRVSN_EXPORT_CSR 0x211UL
- #define HWRM_MFG_PRVSN_IMPORT_CERT 0x212UL
- #define HWRM_MFG_PRVSN_GET_STATE 0x213UL
- #define HWRM_MFG_GET_NVM_MEASUREMENT 0x214UL
- #define HWRM_MFG_PSOC_QSTATUS 0x215UL
- #define HWRM_MFG_SELFTEST_QLIST 0x216UL
- #define HWRM_MFG_SELFTEST_EXEC 0x217UL
- #define HWRM_STAT_GENERIC_QSTATS 0x218UL
- #define HWRM_MFG_PRVSN_EXPORT_CERT 0x219UL
- #define HWRM_STAT_DB_ERROR_QSTATS 0x21aUL
- #define HWRM_MFG_TESTS 0x21bUL
- #define HWRM_MFG_WRITE_CERT_NVM 0x21cUL
- #define HWRM_PORT_POE_CFG 0x230UL
- #define HWRM_PORT_POE_QCFG 0x231UL
- #define HWRM_UDCC_QCAPS 0x258UL
- #define HWRM_UDCC_CFG 0x259UL
- #define HWRM_UDCC_QCFG 0x25aUL
- #define HWRM_UDCC_SESSION_CFG 0x25bUL
- #define HWRM_UDCC_SESSION_QCFG 0x25cUL
- #define HWRM_UDCC_SESSION_QUERY 0x25dUL
- #define HWRM_UDCC_COMP_CFG 0x25eUL
- #define HWRM_UDCC_COMP_QCFG 0x25fUL
- #define HWRM_UDCC_COMP_QUERY 0x260UL
- #define HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS 0x261UL
- #define HWRM_QUEUE_PFCWD_TIMEOUT_CFG 0x262UL
- #define HWRM_QUEUE_PFCWD_TIMEOUT_QCFG 0x263UL
- #define HWRM_TF 0x2bcUL
- #define HWRM_TF_VERSION_GET 0x2bdUL
- #define HWRM_TF_SESSION_OPEN 0x2c6UL
- #define HWRM_TF_SESSION_REGISTER 0x2c8UL
- #define HWRM_TF_SESSION_UNREGISTER 0x2c9UL
- #define HWRM_TF_SESSION_CLOSE 0x2caUL
- #define HWRM_TF_SESSION_QCFG 0x2cbUL
- #define HWRM_TF_SESSION_RESC_QCAPS 0x2ccUL
- #define HWRM_TF_SESSION_RESC_ALLOC 0x2cdUL
- #define HWRM_TF_SESSION_RESC_FREE 0x2ceUL
- #define HWRM_TF_SESSION_RESC_FLUSH 0x2cfUL
- #define HWRM_TF_SESSION_RESC_INFO 0x2d0UL
- #define HWRM_TF_SESSION_HOTUP_STATE_SET 0x2d1UL
- #define HWRM_TF_SESSION_HOTUP_STATE_GET 0x2d2UL
- #define HWRM_TF_TBL_TYPE_GET 0x2daUL
- #define HWRM_TF_TBL_TYPE_SET 0x2dbUL
- #define HWRM_TF_TBL_TYPE_BULK_GET 0x2dcUL
- #define HWRM_TF_EM_INSERT 0x2eaUL
- #define HWRM_TF_EM_DELETE 0x2ebUL
- #define HWRM_TF_EM_HASH_INSERT 0x2ecUL
- #define HWRM_TF_EM_MOVE 0x2edUL
- #define HWRM_TF_TCAM_SET 0x2f8UL
- #define HWRM_TF_TCAM_GET 0x2f9UL
- #define HWRM_TF_TCAM_MOVE 0x2faUL
- #define HWRM_TF_TCAM_FREE 0x2fbUL
- #define HWRM_TF_GLOBAL_CFG_SET 0x2fcUL
- #define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL
- #define HWRM_TF_IF_TBL_SET 0x2feUL
- #define HWRM_TF_IF_TBL_GET 0x2ffUL
- #define HWRM_TF_RESC_USAGE_SET 0x300UL
- #define HWRM_TF_RESC_USAGE_QUERY 0x301UL
- #define HWRM_TF_TBL_TYPE_ALLOC 0x302UL
- #define HWRM_TF_TBL_TYPE_FREE 0x303UL
- #define HWRM_TFC_TBL_SCOPE_QCAPS 0x380UL
- #define HWRM_TFC_TBL_SCOPE_ID_ALLOC 0x381UL
- #define HWRM_TFC_TBL_SCOPE_CONFIG 0x382UL
- #define HWRM_TFC_TBL_SCOPE_DECONFIG 0x383UL
- #define HWRM_TFC_TBL_SCOPE_FID_ADD 0x384UL
- #define HWRM_TFC_TBL_SCOPE_FID_REM 0x385UL
- #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC 0x386UL
- #define HWRM_TFC_TBL_SCOPE_POOL_FREE 0x387UL
- #define HWRM_TFC_SESSION_ID_ALLOC 0x388UL
- #define HWRM_TFC_SESSION_FID_ADD 0x389UL
- #define HWRM_TFC_SESSION_FID_REM 0x38aUL
- #define HWRM_TFC_IDENT_ALLOC 0x38bUL
- #define HWRM_TFC_IDENT_FREE 0x38cUL
- #define HWRM_TFC_IDX_TBL_ALLOC 0x38dUL
- #define HWRM_TFC_IDX_TBL_ALLOC_SET 0x38eUL
- #define HWRM_TFC_IDX_TBL_SET 0x38fUL
- #define HWRM_TFC_IDX_TBL_GET 0x390UL
- #define HWRM_TFC_IDX_TBL_FREE 0x391UL
- #define HWRM_TFC_GLOBAL_ID_ALLOC 0x392UL
- #define HWRM_TFC_TCAM_SET 0x393UL
- #define HWRM_TFC_TCAM_GET 0x394UL
- #define HWRM_TFC_TCAM_ALLOC 0x395UL
- #define HWRM_TFC_TCAM_ALLOC_SET 0x396UL
- #define HWRM_TFC_TCAM_FREE 0x397UL
- #define HWRM_TFC_IF_TBL_SET 0x398UL
- #define HWRM_TFC_IF_TBL_GET 0x399UL
- #define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL
- #define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL
- #define HWRM_TFC_GLOBAL_ID_FREE 0x39cUL
- #define HWRM_TFC_TCAM_PRI_UPDATE 0x39dUL
- #define HWRM_TFC_HOT_UPGRADE_PROCESS 0x3a0UL
- #define HWRM_SV 0x400UL
- #define HWRM_DBG_SERDES_TEST 0xff0eUL
- #define HWRM_DBG_LOG_BUFFER_FLUSH 0xff0fUL
- #define HWRM_DBG_READ_DIRECT 0xff10UL
- #define HWRM_DBG_READ_INDIRECT 0xff11UL
- #define HWRM_DBG_WRITE_DIRECT 0xff12UL
- #define HWRM_DBG_WRITE_INDIRECT 0xff13UL
- #define HWRM_DBG_DUMP 0xff14UL
- #define HWRM_DBG_ERASE_NVM 0xff15UL
- #define HWRM_DBG_CFG 0xff16UL
- #define HWRM_DBG_COREDUMP_LIST 0xff17UL
- #define HWRM_DBG_COREDUMP_INITIATE 0xff18UL
- #define HWRM_DBG_COREDUMP_RETRIEVE 0xff19UL
- #define HWRM_DBG_FW_CLI 0xff1aUL
- #define HWRM_DBG_I2C_CMD 0xff1bUL
- #define HWRM_DBG_RING_INFO_GET 0xff1cUL
- #define HWRM_DBG_CRASHDUMP_HEADER 0xff1dUL
- #define HWRM_DBG_CRASHDUMP_ERASE 0xff1eUL
- #define HWRM_DBG_DRV_TRACE 0xff1fUL
- #define HWRM_DBG_QCAPS 0xff20UL
- #define HWRM_DBG_QCFG 0xff21UL
- #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG 0xff22UL
- #define HWRM_DBG_USEQ_ALLOC 0xff23UL
- #define HWRM_DBG_USEQ_FREE 0xff24UL
- #define HWRM_DBG_USEQ_FLUSH 0xff25UL
- #define HWRM_DBG_USEQ_QCAPS 0xff26UL
- #define HWRM_DBG_USEQ_CW_CFG 0xff27UL
- #define HWRM_DBG_USEQ_SCHED_CFG 0xff28UL
- #define HWRM_DBG_USEQ_RUN 0xff29UL
- #define HWRM_DBG_USEQ_DELIVERY_REQ 0xff2aUL
- #define HWRM_DBG_USEQ_RESP_HDR 0xff2bUL
- #define HWRM_DBG_COREDUMP_CAPTURE 0xff2cUL
- #define HWRM_DBG_PTRACE 0xff2dUL
- #define HWRM_DBG_SIM_CABLE_STATE 0xff2eUL
- #define HWRM_NVM_GET_VPD_FIELD_INFO 0xffeaUL
- #define HWRM_NVM_SET_VPD_FIELD_INFO 0xffebUL
- #define HWRM_NVM_DEFRAG 0xffecUL
- #define HWRM_NVM_REQ_ARBITRATION 0xffedUL
- #define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
- #define HWRM_NVM_VALIDATE_OPTION 0xffefUL
- #define HWRM_NVM_FLUSH 0xfff0UL
- #define HWRM_NVM_GET_VARIABLE 0xfff1UL
- #define HWRM_NVM_SET_VARIABLE 0xfff2UL
- #define HWRM_NVM_INSTALL_UPDATE 0xfff3UL
- #define HWRM_NVM_MODIFY 0xfff4UL
- #define HWRM_NVM_VERIFY_UPDATE 0xfff5UL
- #define HWRM_NVM_GET_DEV_INFO 0xfff6UL
- #define HWRM_NVM_ERASE_DIR_ENTRY 0xfff7UL
- #define HWRM_NVM_MOD_DIR_ENTRY 0xfff8UL
- #define HWRM_NVM_FIND_DIR_ENTRY 0xfff9UL
- #define HWRM_NVM_GET_DIR_ENTRIES 0xfffaUL
- #define HWRM_NVM_GET_DIR_INFO 0xfffbUL
- #define HWRM_NVM_RAW_DUMP 0xfffcUL
- #define HWRM_NVM_READ 0xfffdUL
- #define HWRM_NVM_WRITE 0xfffeUL
- #define HWRM_NVM_RAW_WRITE_BLK 0xffffUL
- #define HWRM_LAST HWRM_NVM_RAW_WRITE_BLK
- __le16 unused_0[3];
-};
-
-/* ret_codes (size:64b/8B) */
-struct ret_codes {
- __le16 error_code;
- #define HWRM_ERR_CODE_SUCCESS 0x0UL
- #define HWRM_ERR_CODE_FAIL 0x1UL
- #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL
- #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL
- #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL
- #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL
- #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL
- #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
- #define HWRM_ERR_CODE_NO_BUFFER 0x8UL
- #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL
- #define HWRM_ERR_CODE_HOT_RESET_PROGRESS 0xaUL
- #define HWRM_ERR_CODE_HOT_RESET_FAIL 0xbUL
- #define HWRM_ERR_CODE_NO_FLOW_COUNTER_DURING_ALLOC 0xcUL
- #define HWRM_ERR_CODE_KEY_HASH_COLLISION 0xdUL
- #define HWRM_ERR_CODE_KEY_ALREADY_EXISTS 0xeUL
- #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
- #define HWRM_ERR_CODE_BUSY 0x10UL
- #define HWRM_ERR_CODE_RESOURCE_LOCKED 0x11UL
- #define HWRM_ERR_CODE_PF_UNAVAILABLE 0x12UL
- #define HWRM_ERR_CODE_ENTITY_NOT_PRESENT 0x13UL
- #define HWRM_ERR_CODE_SECURE_SOC_ERROR 0x14UL
- #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
- #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
- #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
- #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED
- __le16 unused_0[3];
-};
-
-/* hwrm_err_output (size:128b/16B) */
-struct hwrm_err_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 opaque_0;
- __le16 opaque_1;
- u8 cmd_err;
- u8 valid;
-};
-#define HWRM_NA_SIGNATURE ((__le32)(-1))
-#define HWRM_MAX_REQ_LEN 128
-#define HWRM_MAX_RESP_LEN 704
-#define HW_HASH_INDEX_SIZE 0x80
-#define HW_HASH_KEY_SIZE 40
-#define HWRM_RESP_VALID_KEY 1
-#define HWRM_TARGET_ID_BONO 0xFFF8
-#define HWRM_TARGET_ID_KONG 0xFFF9
-#define HWRM_TARGET_ID_APE 0xFFFA
-#define HWRM_TARGET_ID_TOOLS 0xFFFD
-#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 10
-#define HWRM_VERSION_UPDATE 3
-#define HWRM_VERSION_RSVD 97
-#define HWRM_VERSION_STR "1.10.3.97"
-
-/* hwrm_ver_get_input (size:192b/24B) */
-struct hwrm_ver_get_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 hwrm_intf_maj;
- u8 hwrm_intf_min;
- u8 hwrm_intf_upd;
- u8 unused_0[5];
-};
-
-/* hwrm_ver_get_output (size:1408b/176B) */
-struct hwrm_ver_get_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 hwrm_intf_maj_8b;
- u8 hwrm_intf_min_8b;
- u8 hwrm_intf_upd_8b;
- u8 hwrm_intf_rsvd_8b;
- u8 hwrm_fw_maj_8b;
- u8 hwrm_fw_min_8b;
- u8 hwrm_fw_bld_8b;
- u8 hwrm_fw_rsvd_8b;
- u8 mgmt_fw_maj_8b;
- u8 mgmt_fw_min_8b;
- u8 mgmt_fw_bld_8b;
- u8 mgmt_fw_rsvd_8b;
- u8 netctrl_fw_maj_8b;
- u8 netctrl_fw_min_8b;
- u8 netctrl_fw_bld_8b;
- u8 netctrl_fw_rsvd_8b;
- __le32 dev_caps_cfg;
- #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
- #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
- #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL
- #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL
- #define VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED 0x10UL
- #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED 0x20UL
- #define VER_GET_RESP_DEV_CAPS_CFG_L2_FILTER_TYPES_ROCE_OR_L2_SUPPORTED 0x40UL
- #define VER_GET_RESP_DEV_CAPS_CFG_VIRTIO_VSWITCH_OFFLOAD_SUPPORTED 0x80UL
- #define VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED 0x100UL
- #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_AGING_SUPPORTED 0x200UL
- #define VER_GET_RESP_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED 0x400UL
- #define VER_GET_RESP_DEV_CAPS_CFG_CFA_EEM_SUPPORTED 0x800UL
- #define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL
- #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL
- #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL
- #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE 0x8000UL
- #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_SOC_CAPABLE 0x10000UL
- u8 roce_fw_maj_8b;
- u8 roce_fw_min_8b;
- u8 roce_fw_bld_8b;
- u8 roce_fw_rsvd_8b;
- char hwrm_fw_name[16];
- char mgmt_fw_name[16];
- char netctrl_fw_name[16];
- char active_pkg_name[16];
- char roce_fw_name[16];
- __le16 chip_num;
- u8 chip_rev;
- u8 chip_metal;
- u8 chip_bond_id;
- u8 chip_platform_type;
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_LAST VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM
- __le16 max_req_win_len;
- __le16 max_resp_len;
- __le16 def_req_timeout;
- u8 flags;
- #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL
- #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL
- #define VER_GET_RESP_FLAGS_DEV_NOT_RDY_BACKING_STORE 0x4UL
- u8 unused_0[2];
- u8 always_1;
- __le16 hwrm_intf_major;
- __le16 hwrm_intf_minor;
- __le16 hwrm_intf_build;
- __le16 hwrm_intf_patch;
- __le16 hwrm_fw_major;
- __le16 hwrm_fw_minor;
- __le16 hwrm_fw_build;
- __le16 hwrm_fw_patch;
- __le16 mgmt_fw_major;
- __le16 mgmt_fw_minor;
- __le16 mgmt_fw_build;
- __le16 mgmt_fw_patch;
- __le16 netctrl_fw_major;
- __le16 netctrl_fw_minor;
- __le16 netctrl_fw_build;
- __le16 netctrl_fw_patch;
- __le16 roce_fw_major;
- __le16 roce_fw_minor;
- __le16 roce_fw_build;
- __le16 roce_fw_patch;
- __le16 max_ext_req_len;
- __le16 max_req_timeout;
- u8 unused_1[3];
- u8 valid;
-};
-
-/* eject_cmpl (size:128b/16B) */
-struct eject_cmpl {
- __le16 type;
- #define EJECT_CMPL_TYPE_MASK 0x3fUL
- #define EJECT_CMPL_TYPE_SFT 0
- #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
- #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT
- #define EJECT_CMPL_FLAGS_MASK 0xffc0UL
- #define EJECT_CMPL_FLAGS_SFT 6
- #define EJECT_CMPL_FLAGS_ERROR 0x40UL
- __le16 len;
- __le32 opaque;
- __le16 v;
- #define EJECT_CMPL_V 0x1UL
- #define EJECT_CMPL_ERRORS_MASK 0xfffeUL
- #define EJECT_CMPL_ERRORS_SFT 1
- #define EJECT_CMPL_ERRORS_BUFFER_ERROR_MASK 0xeUL
- #define EJECT_CMPL_ERRORS_BUFFER_ERROR_SFT 1
- #define EJECT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0UL << 1)
- #define EJECT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1UL << 1)
- #define EJECT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3UL << 1)
- #define EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH (0x5UL << 1)
- #define EJECT_CMPL_ERRORS_BUFFER_ERROR_LAST EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH
- __le16 reserved16;
- __le32 unused_2;
-};
-
-/* hwrm_cmpl (size:128b/16B) */
-struct hwrm_cmpl {
- __le16 type;
- #define CMPL_TYPE_MASK 0x3fUL
- #define CMPL_TYPE_SFT 0
- #define CMPL_TYPE_HWRM_DONE 0x20UL
- #define CMPL_TYPE_LAST CMPL_TYPE_HWRM_DONE
- __le16 sequence_id;
- __le32 unused_1;
- __le32 v;
- #define CMPL_V 0x1UL
- __le32 unused_3;
-};
-
-/* hwrm_fwd_req_cmpl (size:128b/16B) */
-struct hwrm_fwd_req_cmpl {
- __le16 req_len_type;
- #define FWD_REQ_CMPL_TYPE_MASK 0x3fUL
- #define FWD_REQ_CMPL_TYPE_SFT 0
- #define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL
- #define FWD_REQ_CMPL_TYPE_LAST FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ
- #define FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
- #define FWD_REQ_CMPL_REQ_LEN_SFT 6
- __le16 source_id;
- __le32 unused0;
- __le32 req_buf_addr_v[2];
- #define FWD_REQ_CMPL_V 0x1UL
- #define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
- #define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
-};
-
-/* hwrm_fwd_resp_cmpl (size:128b/16B) */
-struct hwrm_fwd_resp_cmpl {
- __le16 type;
- #define FWD_RESP_CMPL_TYPE_MASK 0x3fUL
- #define FWD_RESP_CMPL_TYPE_SFT 0
- #define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL
- #define FWD_RESP_CMPL_TYPE_LAST FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP
- __le16 source_id;
- __le16 resp_len;
- __le16 unused_1;
- __le32 resp_buf_addr_v[2];
- #define FWD_RESP_CMPL_V 0x1UL
- #define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
- #define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
-};
-
-/* hwrm_async_event_cmpl (size:128b/16B) */
-struct hwrm_async_event_cmpl {
- __le16 type;
- #define ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_TYPE_LAST ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY 0x9UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG 0xaUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE 0x43UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP 0x44UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD 0x46UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_RSS_CHANGE 0x47UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE 0x48UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR 0x49UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_CTX_ERROR 0x4aUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE 0x4bUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PEER_MMAP_CHANGE 0x4dUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_REPRESENTOR_PAIR_CHANGE 0x4eUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_STAT_CHANGE 0x4fUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_HOST_COREDUMP 0x50UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x51UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_V 0x1UL
- #define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
-};
-
-/* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */
-struct hwrm_async_event_cmpl_link_status_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN 0x0UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_MASK 0xff00000UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_SFT 20
-};
-
-/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */
-struct hwrm_async_event_cmpl_port_conn_not_allowed {
- __le16 type;
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
-};
-
-/* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */
-struct hwrm_async_event_cmpl_link_speed_cfg_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
-};
-
-/* hwrm_async_event_cmpl_reset_notify (size:128b/16B) */
-struct hwrm_async_event_cmpl_reset_notify {
- __le16 type;
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY 0x8UL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_SFT 0
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_V 0x1UL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_SFT 0
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_STOP_TX_QUEUE 0x1UL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN 0x2UL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK 0xff00UL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_SFT 8
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MANAGEMENT_RESET_REQUEST (0x1UL << 8)
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL (0x2UL << 8)
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL (0x3UL << 8)
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET (0x4UL << 8)
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION (0x5UL << 8)
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK 0xffff0000UL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16
-};
-
-/* hwrm_async_event_cmpl_error_recovery (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_recovery {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY 0x9UL
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED 0x2UL
-};
-
-/* hwrm_async_event_cmpl_ring_monitor_msg (size:128b/16B) */
-struct hwrm_async_event_cmpl_ring_monitor_msg {
- __le16 type;
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG 0xaUL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_TX 0x0UL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX 0x1UL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL 0x2UL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_V 0x1UL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
-};
-
-/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
-struct hwrm_async_event_cmpl_vf_cfg_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_SFT 0
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TF_OWNERSHIP_RELEASE 0x20UL
-};
-
-/* hwrm_async_event_cmpl_default_vnic_change (size:128b/16B) */
-struct hwrm_async_event_cmpl_default_vnic_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_MASK 0xffc0UL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_SFT 6
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION 0x35UL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_MASK 0x3UL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_SFT 0
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_ALLOC 0x1UL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE 0x2UL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_MASK 0x3fcUL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_SFT 2
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_MASK 0x3fffc00UL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_SFT 10
-};
-
-/* hwrm_async_event_cmpl_hw_flow_aged (size:128b/16B) */
-struct hwrm_async_event_cmpl_hw_flow_aged {
- __le16 type;
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED 0x36UL
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_V 0x1UL
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_MASK 0x7fffffffUL
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_SFT 0
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION 0x80000000UL
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_RX (0x0UL << 31)
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX (0x1UL << 31)
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX
-};
-
-/* hwrm_async_event_cmpl_eem_cache_flush_req (size:128b/16B) */
-struct hwrm_async_event_cmpl_eem_cache_flush_req {
- __le16 type;
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_V 0x1UL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
-};
-
-/* hwrm_async_event_cmpl_eem_cache_flush_done (size:128b/16B) */
-struct hwrm_async_event_cmpl_eem_cache_flush_done {
- __le16 type;
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_V 0x1UL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_SFT 0
-};
-
-/* hwrm_async_event_cmpl_deferred_response (size:128b/16B) */
-struct hwrm_async_event_cmpl_deferred_response {
- __le16 type;
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE 0x40UL
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_SFT 0
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_V 0x1UL
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
-};
-
-/* hwrm_async_event_cmpl_echo_request (size:128b/16B) */
-struct hwrm_async_event_cmpl_echo_request {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST 0x42UL
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_V 0x1UL
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
-};
-
-/* hwrm_async_event_cmpl_phc_update (size:128b/16B) */
-struct hwrm_async_event_cmpl_phc_update {
- __le16 type;
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE 0x43UL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_SFT 0
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_MASK 0xffff0000UL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_SFT 16
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_V 0x1UL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK 0xfUL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT 0
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_MASTER 0x1UL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_SECONDARY 0x2UL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_FAILOVER 0x3UL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE 0x4UL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK 0xffff0UL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT 4
-};
-
-/* hwrm_async_event_cmpl_pps_timestamp (size:128b/16B) */
-struct hwrm_async_event_cmpl_pps_timestamp {
- __le16 type;
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP 0x44UL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE 0x1UL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_INTERNAL 0x0UL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL 0x1UL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_MASK 0xeUL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_SFT 1
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_MASK 0xffff0UL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_SFT 4
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_V 0x1UL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_MASK 0xffffffffUL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_SFT 0
-};
-
-/* hwrm_async_event_cmpl_error_report (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_report {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_SFT 0
-};
-
-/* hwrm_async_event_cmpl_dbg_buf_producer (size:128b/16B) */
-struct hwrm_async_event_cmpl_dbg_buf_producer {
- __le16 type;
- #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL
- #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_DBG_BUF_PRODUCER
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK 0xffffffffUL
- #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT 0
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_V 0x1UL
- #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SRT_TRACE 0x0UL
- #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SRT2_TRACE 0x1UL
- #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CRT_TRACE 0x2UL
- #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CRT2_TRACE 0x3UL
- #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_RIGP0_TRACE 0x4UL
- #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_L2_HWRM_TRACE 0x5UL
- #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_ROCE_HWRM_TRACE 0x6UL
- #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA0_TRACE 0x7UL
- #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA1_TRACE 0x8UL
- #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA2_TRACE 0x9UL
- #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_RIGP1_TRACE 0xaUL
- #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_AFM_KONG_HWRM_TRACE 0xbUL
- #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_AFM_KONG_HWRM_TRACE
-};
-
-/* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */
-struct hwrm_async_event_cmpl_hwrm_error {
- __le16 type;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
-};
-
-/* hwrm_async_event_cmpl_error_report_base (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_report_base {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED
-};
-
-/* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_report_pause_storm {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM
-};
-
-/* hwrm_async_event_cmpl_error_report_invalid_signal (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_report_invalid_signal {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_SFT 0
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL
-};
-
-/* hwrm_async_event_cmpl_error_report_nvm (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_report_nvm {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_MASK 0xffffffffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_SFT 0
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR 0x3UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_MASK 0xff00UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_SFT 8
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_WRITE (0x1UL << 8)
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE (0x2UL << 8)
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE
-};
-
-/* hwrm_async_event_cmpl_error_report_doorbell_drop_threshold (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_report_doorbell_drop_threshold {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_MASK 0xffffff00UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_SFT 8
-};
-
-/* hwrm_async_event_cmpl_error_report_thermal (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_report_thermal {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK 0xff00UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT 8
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT 0x5UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK 0x700UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SFT 8
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN (0x0UL << 8)
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL (0x1UL << 8)
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL (0x2UL << 8)
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN (0x3UL << 8)
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR 0x800UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_DECREASING (0x0UL << 11)
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING (0x1UL << 11)
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING
-};
-
-/* hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED
-};
-
-/* hwrm_func_reset_input (size:192b/24B) */
-struct hwrm_func_reset_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL
- __le16 vf_id;
- u8 func_reset_level;
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_LAST FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF
- u8 unused_0;
-};
-
-/* hwrm_func_reset_output (size:128b/16B) */
-struct hwrm_func_reset_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_getfid_input (size:192b/24B) */
-struct hwrm_func_getfid_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL
- __le16 pci_id;
- u8 unused_0[2];
-};
-
-/* hwrm_func_getfid_output (size:128b/16B) */
-struct hwrm_func_getfid_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_func_vf_alloc_input (size:192b/24B) */
-struct hwrm_func_vf_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL
- __le16 first_vf_id;
- __le16 num_vfs;
-};
-
-/* hwrm_func_vf_alloc_output (size:128b/16B) */
-struct hwrm_func_vf_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 first_vf_id;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_func_vf_free_input (size:192b/24B) */
-struct hwrm_func_vf_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL
- __le16 first_vf_id;
- __le16 num_vfs;
-};
-
-/* hwrm_func_vf_free_output (size:128b/16B) */
-struct hwrm_func_vf_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_vf_cfg_input (size:576b/72B) */
-struct hwrm_func_vf_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
- #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
- #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL
- #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x10UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x20UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS 0x40UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS 0x80UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS 0x100UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_TX_KEY_CTXS 0x1000UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_RX_KEY_CTXS 0x2000UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_TX_KEY_CTXS 0x4000UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_RX_KEY_CTXS 0x8000UL
- __le16 mtu;
- __le16 guest_vlan;
- __le16 async_event_cr;
- u8 dflt_mac_addr[6];
- __le32 flags;
- #define FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x1UL
- #define FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x2UL
- #define FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x4UL
- #define FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x8UL
- #define FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x10UL
- #define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x20UL
- #define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x40UL
- #define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x80UL
- #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x100UL
- #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x200UL
- __le16 num_rsscos_ctxs;
- __le16 num_cmpl_rings;
- __le16 num_tx_rings;
- __le16 num_rx_rings;
- __le16 num_l2_ctxs;
- __le16 num_vnics;
- __le16 num_stat_ctxs;
- __le16 num_hw_ring_grps;
- __le32 num_ktls_tx_key_ctxs;
- __le32 num_ktls_rx_key_ctxs;
- __le16 num_msix;
- u8 unused[2];
- __le32 num_quic_tx_key_ctxs;
- __le32 num_quic_rx_key_ctxs;
-};
-
-/* hwrm_func_vf_cfg_output (size:128b/16B) */
-struct hwrm_func_vf_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_qcaps_input (size:192b/24B) */
-struct hwrm_func_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0[6];
-};
-
-/* hwrm_func_qcaps_output (size:1152b/144B) */
-struct hwrm_func_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- __le16 port_id;
- __le32 flags;
- #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
- #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
- #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
- #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
- #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
- #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
- #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
- #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
- #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
- #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
- #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL
- #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL
- #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL
- #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL
- #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL
- #define FUNC_QCAPS_RESP_FLAGS_ADOPTED_PF_SUPPORTED 0x20000UL
- #define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL
- #define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL
- #define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL
- #define FUNC_QCAPS_RESP_FLAGS_DYNAMIC_TX_RING_ALLOC 0x200000UL
- #define FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE 0x400000UL
- #define FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE 0x800000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED 0x1000000UL
- #define FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD 0x2000000UL
- #define FUNC_QCAPS_RESP_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED 0x4000000UL
- #define FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED 0x8000000UL
- #define FUNC_QCAPS_RESP_FLAGS_COREDUMP_CMD_SUPPORTED 0x10000000UL
- #define FUNC_QCAPS_RESP_FLAGS_CRASHDUMP_CMD_SUPPORTED 0x20000000UL
- #define FUNC_QCAPS_RESP_FLAGS_PFC_WD_STATS_SUPPORTED 0x40000000UL
- #define FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED 0x80000000UL
- u8 mac_address[6];
- __le16 max_rsscos_ctx;
- __le16 max_cmpl_rings;
- __le16 max_tx_rings;
- __le16 max_rx_rings;
- __le16 max_l2_ctxs;
- __le16 max_vnics;
- __le16 first_vf_id;
- __le16 max_vfs;
- __le16 max_stat_ctx;
- __le32 max_encap_records;
- __le32 max_decap_records;
- __le32 max_tx_em_flows;
- __le32 max_tx_wm_flows;
- __le32 max_rx_em_flows;
- __le32 max_rx_wm_flows;
- __le32 max_mcast_filters;
- __le32 max_flow_id;
- __le32 max_hw_ring_grps;
- __le16 max_sp_tx_rings;
- __le16 max_msix_vfs;
- __le32 flags_ext;
- #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT 0x8UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PROXY_MODE_SUPPORT 0x10UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_EVB_MODE_CFG_NOT_SUPPORTED 0x100UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_SOC_SPD_SUPPORTED 0x200UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED 0x400UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_FAST_RESET_CAPABLE 0x800UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_METADATA_CFG_CAPABLE 0x1000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_NVM_OPTION_ACTION_SUPPORTED 0x2000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_BD_METADATA_SUPPORTED 0x4000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_ECHO_REQUEST_SUPPORTED 0x8000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED 0x10000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED 0x20000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED 0x40000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_VF_CFG_ASYNC_FOR_PF_SUPPORTED 0x80000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PARTITION_BW_SUPPORTED 0x100000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED 0x200000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_KTLS_SUPPORTED 0x400000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_EP_RATE_CONTROL 0x800000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_MIN_BW_SUPPORTED 0x1000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP 0x2000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED 0x4000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_REQUIRED 0x8000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED 0x10000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_DBR_PACING_SUPPORTED 0x20000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_HW_DBR_DROP_RECOV_SUPPORTED 0x40000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_DISABLE_CQ_OVERFLOW_DETECTION_SUPPORTED 0x80000000UL
- u8 max_schqs;
- u8 mpc_chnls_cap;
- #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL
- #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RCE 0x2UL
- #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TE_CFA 0x4UL
- #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RE_CFA 0x8UL
- #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_PRIMATE 0x10UL
- __le16 max_key_ctxs_alloc;
- __le32 flags_ext2;
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_QUIC_SUPPORTED 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_KDNET_SUPPORTED 0x4UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED 0x8UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED 0x10UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_GENERIC_STATS_SUPPORTED 0x20UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED 0x40UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_SYNCE_SUPPORTED 0x80UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED 0x100UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED 0x200UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_HW_LAG_SUPPORTED 0x400UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_ON_CHIP_CTX_SUPPORTED 0x800UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_STEERING_TAG_SUPPORTED 0x1000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_ENHANCED_VF_SCALE_SUPPORTED 0x2000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_KEY_XID_PARTITION_SUPPORTED 0x4000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_CONCURRENT_KTLS_QUIC_SUPPORTED 0x8000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_CROSS_TC_CAP_SUPPORTED 0x10000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_PER_TC_CAP_SUPPORTED 0x20000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_PER_TC_RESERVATION_SUPPORTED 0x40000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_DB_ERROR_STATS_SUPPORTED 0x80000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED 0x100000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDCC_SUPPORTED 0x200000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_SO_TXTIME_SUPPORTED 0x400000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED 0x800000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_INGRESS_NIC_FLOW_SUPPORTED 0x1000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_LPBK_STATS_SUPPORTED 0x2000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_EGRESS_NIC_FLOW_SUPPORTED 0x4000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_MULTI_LOSSLESS_QUEUES_SUPPORTED 0x8000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_PEER_MMAP_SUPPORTED 0x10000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_PACING_SUPPORTED 0x20000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_VF_STAT_EJECTION_SUPPORTED 0x40000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_HOST_COREDUMP_SUPPORTED 0x80000000UL
- __le16 tunnel_disable_flag;
- #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL
- #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL
- #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NVGRE 0x4UL
- #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_L2GRE 0x8UL
- #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_GRE 0x10UL
- #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_IPINIP 0x20UL
- #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_MPLS 0x40UL
- #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE 0x80UL
- __le16 xid_partition_cap;
- #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_TX_CK 0x1UL
- #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_RX_CK 0x2UL
- u8 device_serial_number[8];
- __le16 ctxs_per_partition;
- __le16 max_tso_segs;
- __le32 roce_vf_max_av;
- __le32 roce_vf_max_cq;
- __le32 roce_vf_max_mrw;
- __le32 roce_vf_max_qp;
- __le32 roce_vf_max_srq;
- __le32 roce_vf_max_gid;
- __le32 flags_ext3;
- #define FUNC_QCAPS_RESP_FLAGS_EXT3_RM_RSV_WHILE_ALLOC_CAP 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT3_REQUIRE_L2_FILTER 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT3_MAX_ROCE_VFS_SUPPORTED 0x4UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT3_RX_RATE_PROFILE_SEL_SUPPORTED 0x8UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT3_BIDI_OPT_SUPPORTED 0x10UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED 0x20UL
- __le16 max_roce_vfs;
- __le16 max_crypto_rx_flow_filters;
- u8 unused_3[3];
- u8 valid;
-};
-
-/* hwrm_func_qcfg_input (size:192b/24B) */
-struct hwrm_func_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0[6];
-};
-
-/* hwrm_func_qcfg_output (size:1344b/168B) */
-struct hwrm_func_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- __le16 port_id;
- __le16 vlan;
- __le16 flags;
- #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
- #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
- #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
- #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
- #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
- #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
- #define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL
- #define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL
- #define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL
- #define FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED 0x200UL
- #define FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED 0x400UL
- #define FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED 0x800UL
- #define FUNC_QCFG_RESP_FLAGS_FAST_RESET_ALLOWED 0x1000UL
- #define FUNC_QCFG_RESP_FLAGS_MULTI_ROOT 0x2000UL
- #define FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV 0x4000UL
- #define FUNC_QCFG_RESP_FLAGS_ROCE_VNIC_ID_VALID 0x8000UL
- u8 mac_address[6];
- __le16 pci_id;
- __le16 alloc_rsscos_ctx;
- __le16 alloc_cmpl_rings;
- __le16 alloc_tx_rings;
- __le16 alloc_rx_rings;
- __le16 alloc_l2_ctx;
- __le16 alloc_vnics;
- __le16 admin_mtu;
- __le16 mru;
- __le16 stat_ctx_id;
- u8 port_partition_type;
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2 0x5UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_LAST FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN
- u8 port_pf_cnt;
- #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL
- #define FUNC_QCFG_RESP_PORT_PF_CNT_LAST FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL
- __le16 dflt_vnic_id;
- __le16 max_mtu_configured;
- __le32 min_bw;
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
- #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL
- #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 max_bw;
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0
- #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL
- #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 evb_mode;
- #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
- #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
- #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
- #define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA
- u8 options;
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT 0
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128
- #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL
- #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_SFT 2
- #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2)
- #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2)
- #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2)
- #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO
- #define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK 0xf0UL
- #define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT 4
- __le16 alloc_vfs;
- __le32 alloc_mcast_filters;
- __le32 alloc_hw_ring_grps;
- __le16 alloc_sp_tx_rings;
- __le16 alloc_stat_ctx;
- __le16 alloc_msix;
- __le16 registered_vfs;
- __le16 l2_doorbell_bar_size_kb;
- u8 active_endpoints;
- u8 always_1;
- __le32 reset_addr_poll;
- __le16 legacy_l2_db_size_kb;
- __le16 svif_info;
- #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_MASK 0x7fffUL
- #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_SFT 0
- #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_VALID 0x8000UL
- u8 mpc_chnls;
- #define FUNC_QCFG_RESP_MPC_CHNLS_TCE_ENABLED 0x1UL
- #define FUNC_QCFG_RESP_MPC_CHNLS_RCE_ENABLED 0x2UL
- #define FUNC_QCFG_RESP_MPC_CHNLS_TE_CFA_ENABLED 0x4UL
- #define FUNC_QCFG_RESP_MPC_CHNLS_RE_CFA_ENABLED 0x8UL
- #define FUNC_QCFG_RESP_MPC_CHNLS_PRIMATE_ENABLED 0x10UL
- u8 db_page_size;
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4KB 0x0UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_8KB 0x1UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_16KB 0x2UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_32KB 0x3UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_64KB 0x4UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_128KB 0x5UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_256KB 0x6UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_512KB 0x7UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_1MB 0x8UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_2MB 0x9UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB 0xaUL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_LAST FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB
- __le16 roce_vnic_id;
- __le32 partition_min_bw;
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_SFT 0
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE 0x10000000UL
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100
- __le32 partition_max_bw;
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_SFT 0
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE 0x10000000UL
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
- __le16 host_mtu;
- __le16 flags2;
- #define FUNC_QCFG_RESP_FLAGS2_SRIOV_DSCP_INSERT_ENABLED 0x1UL
- __le16 stag_vid;
- u8 port_kdnet_mode;
- #define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL
- #define FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED 0x1UL
- #define FUNC_QCFG_RESP_PORT_KDNET_MODE_LAST FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED
- u8 kdnet_pcie_function;
- __le16 port_kdnet_fid;
- u8 unused_5;
- u8 roce_bidi_opt_mode;
- #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DISABLED 0x1UL
- #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DEDICATED 0x2UL
- #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_SHARED 0x4UL
- __le32 num_ktls_tx_key_ctxs;
- __le32 num_ktls_rx_key_ctxs;
- u8 lag_id;
- u8 parif;
- u8 fw_lag_id;
- u8 unused_6;
- __le32 num_quic_tx_key_ctxs;
- __le32 num_quic_rx_key_ctxs;
- __le32 roce_max_av_per_vf;
- __le32 roce_max_cq_per_vf;
- __le32 roce_max_mrw_per_vf;
- __le32 roce_max_qp_per_vf;
- __le32 roce_max_srq_per_vf;
- __le32 roce_max_gid_per_vf;
- __le16 xid_partition_cfg;
- #define FUNC_QCFG_RESP_XID_PARTITION_CFG_TX_CK 0x1UL
- #define FUNC_QCFG_RESP_XID_PARTITION_CFG_RX_CK 0x2UL
- __le16 mirror_vnic_id;
- u8 unused_7[7];
- u8 valid;
-};
-
-/* hwrm_func_cfg_input (size:1280b/160B) */
-struct hwrm_func_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- __le16 num_msix;
- __le32 flags;
- #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL
- #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL
- #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL
- #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2
- #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL
- #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL
- #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL
- #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL
- #define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL
- #define FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x4000UL
- #define FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x8000UL
- #define FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x10000UL
- #define FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x20000UL
- #define FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x40000UL
- #define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL
- #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL
- #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL
- #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL
- #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL
- #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL
- #define FUNC_CFG_REQ_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x2000000UL
- #define FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS 0x4000000UL
- #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x8000000UL
- #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL
- #define FUNC_CFG_REQ_FLAGS_BD_METADATA_ENABLE 0x20000000UL
- #define FUNC_CFG_REQ_FLAGS_BD_METADATA_DISABLE 0x40000000UL
- __le32 enables;
- #define FUNC_CFG_REQ_ENABLES_ADMIN_MTU 0x1UL
- #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
- #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
- #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
- #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
- #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
- #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
- #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
- #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
- #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
- #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
- #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
- #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
- #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
- #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
- #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL
- #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL
- #define FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT 0x800000UL
- #define FUNC_CFG_REQ_ENABLES_SCHQ_ID 0x1000000UL
- #define FUNC_CFG_REQ_ENABLES_MPC_CHNLS 0x2000000UL
- #define FUNC_CFG_REQ_ENABLES_PARTITION_MIN_BW 0x4000000UL
- #define FUNC_CFG_REQ_ENABLES_PARTITION_MAX_BW 0x8000000UL
- #define FUNC_CFG_REQ_ENABLES_TPID 0x10000000UL
- #define FUNC_CFG_REQ_ENABLES_HOST_MTU 0x20000000UL
- #define FUNC_CFG_REQ_ENABLES_KTLS_TX_KEY_CTXS 0x40000000UL
- #define FUNC_CFG_REQ_ENABLES_KTLS_RX_KEY_CTXS 0x80000000UL
- __le16 admin_mtu;
- __le16 mru;
- __le16 num_rsscos_ctxs;
- __le16 num_cmpl_rings;
- __le16 num_tx_rings;
- __le16 num_rx_rings;
- __le16 num_l2_ctxs;
- __le16 num_vnics;
- __le16 num_stat_ctxs;
- __le16 num_hw_ring_grps;
- u8 dflt_mac_addr[6];
- __le16 dflt_vlan;
- __be32 dflt_ip_addr[4];
- __le32 min_bw;
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0
- #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL
- #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 max_bw;
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0
- #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL
- #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
- __le16 async_event_cr;
- u8 vlan_antispoof_mode;
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN 0x3UL
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_LAST FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN
- u8 allowed_vlan_pris;
- u8 evb_mode;
- #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL
- #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL
- #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
- #define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA
- u8 options;
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT 0
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128
- #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL
- #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_SFT 2
- #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2)
- #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2)
- #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2)
- #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO
- #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xf0UL
- #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 4
- __le16 num_mcast_filters;
- __le16 schq_id;
- __le16 mpc_chnls;
- #define FUNC_CFG_REQ_MPC_CHNLS_TCE_ENABLE 0x1UL
- #define FUNC_CFG_REQ_MPC_CHNLS_TCE_DISABLE 0x2UL
- #define FUNC_CFG_REQ_MPC_CHNLS_RCE_ENABLE 0x4UL
- #define FUNC_CFG_REQ_MPC_CHNLS_RCE_DISABLE 0x8UL
- #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_ENABLE 0x10UL
- #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_DISABLE 0x20UL
- #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_ENABLE 0x40UL
- #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_DISABLE 0x80UL
- #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_ENABLE 0x100UL
- #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_DISABLE 0x200UL
- __le32 partition_min_bw;
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_SFT 0
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE 0x10000000UL
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_LAST FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100
- __le32 partition_max_bw;
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_SFT 0
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE 0x10000000UL
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
- __be16 tpid;
- __le16 host_mtu;
- __le32 flags2;
- #define FUNC_CFG_REQ_FLAGS2_KTLS_KEY_CTX_ASSETS_TEST 0x1UL
- #define FUNC_CFG_REQ_FLAGS2_QUIC_KEY_CTX_ASSETS_TEST 0x2UL
- __le32 enables2;
- #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL
- #define FUNC_CFG_REQ_ENABLES2_DB_PAGE_SIZE 0x2UL
- #define FUNC_CFG_REQ_ENABLES2_QUIC_TX_KEY_CTXS 0x4UL
- #define FUNC_CFG_REQ_ENABLES2_QUIC_RX_KEY_CTXS 0x8UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF 0x10UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_CQ_PER_VF 0x20UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_MRW_PER_VF 0x40UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_QP_PER_VF 0x80UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_SRQ_PER_VF 0x100UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_GID_PER_VF 0x200UL
- #define FUNC_CFG_REQ_ENABLES2_XID_PARTITION_CFG 0x400UL
- #define FUNC_CFG_REQ_ENABLES2_PHYSICAL_SLOT_NUMBER 0x800UL
- u8 port_kdnet_mode;
- #define FUNC_CFG_REQ_PORT_KDNET_MODE_DISABLED 0x0UL
- #define FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED 0x1UL
- #define FUNC_CFG_REQ_PORT_KDNET_MODE_LAST FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED
- u8 db_page_size;
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_4KB 0x0UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_8KB 0x1UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_16KB 0x2UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_32KB 0x3UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_64KB 0x4UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_128KB 0x5UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_256KB 0x6UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_512KB 0x7UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_1MB 0x8UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_2MB 0x9UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_4MB 0xaUL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_LAST FUNC_CFG_REQ_DB_PAGE_SIZE_4MB
- __le16 physical_slot_number;
- __le32 num_ktls_tx_key_ctxs;
- __le32 num_ktls_rx_key_ctxs;
- __le32 num_quic_tx_key_ctxs;
- __le32 num_quic_rx_key_ctxs;
- __le32 roce_max_av_per_vf;
- __le32 roce_max_cq_per_vf;
- __le32 roce_max_mrw_per_vf;
- __le32 roce_max_qp_per_vf;
- __le32 roce_max_srq_per_vf;
- __le32 roce_max_gid_per_vf;
- __le16 xid_partition_cfg;
- #define FUNC_CFG_REQ_XID_PARTITION_CFG_TX_CK 0x1UL
- #define FUNC_CFG_REQ_XID_PARTITION_CFG_RX_CK 0x2UL
- __le16 unused_2;
-};
-
-/* hwrm_func_cfg_output (size:128b/16B) */
-struct hwrm_func_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_cfg_cmd_err (size:64b/8B) */
-struct hwrm_func_cfg_cmd_err {
- u8 code;
- #define FUNC_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_BW_RANGE 0x1UL
- #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_MORE_THAN_MAX 0x2UL
- #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_BW_UNSUPPORTED 0x3UL
- #define FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_PERCENT 0x4UL
- #define FUNC_CFG_CMD_ERR_CODE_LAST FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_PERCENT
- u8 unused_0[7];
-};
-
-/* hwrm_func_qstats_input (size:192b/24B) */
-struct hwrm_func_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 flags;
- #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL
- #define FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x2UL
- #define FUNC_QSTATS_REQ_FLAGS_L2_ONLY 0x4UL
- u8 unused_0[5];
-};
-
-/* hwrm_func_qstats_output (size:1408b/176B) */
-struct hwrm_func_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_drop_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_drop_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 rx_agg_pkts;
- __le64 rx_agg_bytes;
- __le64 rx_agg_events;
- __le64 rx_agg_aborts;
- u8 clear_seq;
- u8 unused_0[6];
- u8 valid;
-};
-
-/* hwrm_func_qstats_ext_input (size:256b/32B) */
-struct hwrm_func_qstats_ext_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 flags;
- #define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL
- #define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL
- u8 unused_0[1];
- __le32 enables;
- #define FUNC_QSTATS_EXT_REQ_ENABLES_SCHQ_ID 0x1UL
- __le16 schq_id;
- __le16 traffic_class;
- u8 unused_1[4];
-};
-
-/* hwrm_func_qstats_ext_output (size:1536b/192B) */
-struct hwrm_func_qstats_ext_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_error_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_error_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 rx_tpa_eligible_pkt;
- __le64 rx_tpa_eligible_bytes;
- __le64 rx_tpa_pkt;
- __le64 rx_tpa_bytes;
- __le64 rx_tpa_errors;
- __le64 rx_tpa_events;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_clr_stats_input (size:192b/24B) */
-struct hwrm_func_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0[6];
-};
-
-/* hwrm_func_clr_stats_output (size:128b/16B) */
-struct hwrm_func_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_vf_resc_free_input (size:192b/24B) */
-struct hwrm_func_vf_resc_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vf_id;
- u8 unused_0[6];
-};
-
-/* hwrm_func_vf_resc_free_output (size:128b/16B) */
-struct hwrm_func_vf_resc_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_drv_rgtr_input (size:896b/112B) */
-struct hwrm_func_drv_rgtr_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT 0x80UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT 0x100UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_ASYM_QUEUE_CFG_SUPPORT 0x400UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_TF_INGRESS_NIC_FLOW_MODE 0x800UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_TF_EGRESS_NIC_FLOW_MODE 0x1000UL
- __le32 enables;
- #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL
- __le16 os_type;
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_LAST FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI
- u8 ver_maj_8b;
- u8 ver_min_8b;
- u8 ver_upd_8b;
- u8 unused_0[3];
- __le32 timestamp;
- u8 unused_1[4];
- __le32 vf_req_fwd[8];
- __le32 async_event_fwd[8];
- __le16 ver_maj;
- __le16 ver_min;
- __le16 ver_upd;
- __le16 ver_patch;
-};
-
-/* hwrm_func_drv_rgtr_output (size:128b/16B) */
-struct hwrm_func_drv_rgtr_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED 0x1UL
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_func_drv_unrgtr_input (size:192b/24B) */
-struct hwrm_func_drv_unrgtr_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN 0x1UL
- u8 unused_0[4];
-};
-
-/* hwrm_func_drv_unrgtr_output (size:128b/16B) */
-struct hwrm_func_drv_unrgtr_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_buf_rgtr_input (size:1024b/128B) */
-struct hwrm_func_buf_rgtr_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL
- #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL
- __le16 vf_id;
- __le16 req_buf_num_pages;
- __le16 req_buf_page_size;
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_LAST FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G
- __le16 req_buf_len;
- __le16 resp_buf_len;
- u8 unused_0[2];
- __le64 req_buf_page_addr0;
- __le64 req_buf_page_addr1;
- __le64 req_buf_page_addr2;
- __le64 req_buf_page_addr3;
- __le64 req_buf_page_addr4;
- __le64 req_buf_page_addr5;
- __le64 req_buf_page_addr6;
- __le64 req_buf_page_addr7;
- __le64 req_buf_page_addr8;
- __le64 req_buf_page_addr9;
- __le64 error_buf_addr;
- __le64 resp_buf_addr;
-};
-
-/* hwrm_func_buf_rgtr_output (size:128b/16B) */
-struct hwrm_func_buf_rgtr_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_drv_qver_input (size:192b/24B) */
-struct hwrm_func_drv_qver_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 reserved;
- __le16 fid;
- u8 driver_type;
- #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_L2 0x0UL
- #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE 0x1UL
- #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_LAST FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE
- u8 unused_0;
-};
-
-/* hwrm_func_drv_qver_output (size:256b/32B) */
-struct hwrm_func_drv_qver_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 os_type;
- #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_LAST FUNC_DRV_QVER_RESP_OS_TYPE_UEFI
- u8 ver_maj_8b;
- u8 ver_min_8b;
- u8 ver_upd_8b;
- u8 unused_0[3];
- __le16 ver_maj;
- __le16 ver_min;
- __le16 ver_upd;
- __le16 ver_patch;
- u8 unused_1[7];
- u8 valid;
-};
-
-/* hwrm_func_resource_qcaps_input (size:192b/24B) */
-struct hwrm_func_resource_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0[6];
-};
-
-/* hwrm_func_resource_qcaps_output (size:704b/88B) */
-struct hwrm_func_resource_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 max_vfs;
- __le16 max_msix;
- __le16 vf_reservation_strategy;
- #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL
- #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL
- #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC 0x2UL
- #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC
- __le16 min_rsscos_ctx;
- __le16 max_rsscos_ctx;
- __le16 min_cmpl_rings;
- __le16 max_cmpl_rings;
- __le16 min_tx_rings;
- __le16 max_tx_rings;
- __le16 min_rx_rings;
- __le16 max_rx_rings;
- __le16 min_l2_ctxs;
- __le16 max_l2_ctxs;
- __le16 min_vnics;
- __le16 max_vnics;
- __le16 min_stat_ctx;
- __le16 max_stat_ctx;
- __le16 min_hw_ring_grps;
- __le16 max_hw_ring_grps;
- __le16 max_tx_scheduler_inputs;
- __le16 flags;
- #define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED 0x1UL
- __le16 min_msix;
- __le32 min_ktls_tx_key_ctxs;
- __le32 max_ktls_tx_key_ctxs;
- __le32 min_ktls_rx_key_ctxs;
- __le32 max_ktls_rx_key_ctxs;
- __le32 min_quic_tx_key_ctxs;
- __le32 max_quic_tx_key_ctxs;
- __le32 min_quic_rx_key_ctxs;
- __le32 max_quic_rx_key_ctxs;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_func_vf_resource_cfg_input (size:704b/88B) */
-struct hwrm_func_vf_resource_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vf_id;
- __le16 max_msix;
- __le16 min_rsscos_ctx;
- __le16 max_rsscos_ctx;
- __le16 min_cmpl_rings;
- __le16 max_cmpl_rings;
- __le16 min_tx_rings;
- __le16 max_tx_rings;
- __le16 min_rx_rings;
- __le16 max_rx_rings;
- __le16 min_l2_ctxs;
- __le16 max_l2_ctxs;
- __le16 min_vnics;
- __le16 max_vnics;
- __le16 min_stat_ctx;
- __le16 max_stat_ctx;
- __le16 min_hw_ring_grps;
- __le16 max_hw_ring_grps;
- __le16 flags;
- #define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED 0x1UL
- __le16 min_msix;
- __le32 min_ktls_tx_key_ctxs;
- __le32 max_ktls_tx_key_ctxs;
- __le32 min_ktls_rx_key_ctxs;
- __le32 max_ktls_rx_key_ctxs;
- __le32 min_quic_tx_key_ctxs;
- __le32 max_quic_tx_key_ctxs;
- __le32 min_quic_rx_key_ctxs;
- __le32 max_quic_rx_key_ctxs;
-};
-
-/* hwrm_func_vf_resource_cfg_output (size:384b/48B) */
-struct hwrm_func_vf_resource_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 reserved_rsscos_ctx;
- __le16 reserved_cmpl_rings;
- __le16 reserved_tx_rings;
- __le16 reserved_rx_rings;
- __le16 reserved_l2_ctxs;
- __le16 reserved_vnics;
- __le16 reserved_stat_ctx;
- __le16 reserved_hw_ring_grps;
- __le32 reserved_ktls_tx_key_ctxs;
- __le32 reserved_ktls_rx_key_ctxs;
- __le32 reserved_quic_tx_key_ctxs;
- __le32 reserved_quic_rx_key_ctxs;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_backing_store_qcaps_input (size:128b/16B) */
-struct hwrm_func_backing_store_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_func_backing_store_qcaps_output (size:832b/104B) */
-struct hwrm_func_backing_store_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 qp_max_entries;
- __le16 qp_min_qp1_entries;
- __le16 qp_max_l2_entries;
- __le16 qp_entry_size;
- __le16 srq_max_l2_entries;
- __le32 srq_max_entries;
- __le16 srq_entry_size;
- __le16 cq_max_l2_entries;
- __le32 cq_max_entries;
- __le16 cq_entry_size;
- __le16 vnic_max_vnic_entries;
- __le16 vnic_max_ring_table_entries;
- __le16 vnic_entry_size;
- __le32 stat_max_entries;
- __le16 stat_entry_size;
- __le16 tqm_entry_size;
- __le32 tqm_min_entries_per_ring;
- __le32 tqm_max_entries_per_ring;
- __le32 mrav_max_entries;
- __le16 mrav_entry_size;
- __le16 tim_entry_size;
- __le32 tim_max_entries;
- __le16 mrav_num_entries_units;
- u8 tqm_entries_multiple;
- u8 ctx_kind_initializer;
- __le16 ctx_init_mask;
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_QP 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_SRQ 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_CQ 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_VNIC 0x8UL
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_STAT 0x10UL
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_MRAV 0x20UL
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_TKC 0x40UL
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_RKC 0x80UL
- u8 qp_init_offset;
- u8 srq_init_offset;
- u8 cq_init_offset;
- u8 vnic_init_offset;
- u8 tqm_fp_rings_count;
- u8 stat_init_offset;
- u8 mrav_init_offset;
- u8 tqm_fp_rings_count_ext;
- u8 tkc_init_offset;
- u8 rkc_init_offset;
- __le16 tkc_entry_size;
- __le16 rkc_entry_size;
- __le32 tkc_max_entries;
- __le32 rkc_max_entries;
- __le16 fast_qpmd_qp_num_entries;
- u8 rsvd1[5];
- u8 valid;
-};
-
-/* tqm_fp_ring_cfg (size:128b/16B) */
-struct tqm_fp_ring_cfg {
- u8 tqm_ring_pg_size_tqm_ring_lvl;
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_MASK 0xfUL
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_SFT 0
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_0 0x0UL
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_1 0x1UL
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2 0x2UL
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_MASK 0xf0UL
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_SFT 4
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G
- u8 unused[3];
- __le32 tqm_ring_num_entries;
- __le64 tqm_ring_page_dir;
-};
-
-/* hwrm_func_backing_store_cfg_input (size:2688b/336B) */
-struct hwrm_func_backing_store_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT 0x2UL
- __le32 enables;
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING8 0x10000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING9 0x20000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING10 0x40000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TKC 0x80000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_RKC 0x100000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD 0x200000UL
- u8 qpc_pg_size_qpc_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G
- u8 srq_pg_size_srq_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G
- u8 cq_pg_size_cq_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G
- u8 vnic_pg_size_vnic_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G
- u8 stat_pg_size_stat_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G
- u8 tqm_sp_pg_size_tqm_sp_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G
- u8 tqm_ring0_pg_size_tqm_ring0_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G
- u8 tqm_ring1_pg_size_tqm_ring1_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G
- u8 tqm_ring2_pg_size_tqm_ring2_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G
- u8 tqm_ring3_pg_size_tqm_ring3_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G
- u8 tqm_ring4_pg_size_tqm_ring4_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G
- u8 tqm_ring5_pg_size_tqm_ring5_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G
- u8 tqm_ring6_pg_size_tqm_ring6_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G
- u8 tqm_ring7_pg_size_tqm_ring7_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G
- u8 mrav_pg_size_mrav_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G
- u8 tim_pg_size_tim_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G
- __le64 qpc_page_dir;
- __le64 srq_page_dir;
- __le64 cq_page_dir;
- __le64 vnic_page_dir;
- __le64 stat_page_dir;
- __le64 tqm_sp_page_dir;
- __le64 tqm_ring0_page_dir;
- __le64 tqm_ring1_page_dir;
- __le64 tqm_ring2_page_dir;
- __le64 tqm_ring3_page_dir;
- __le64 tqm_ring4_page_dir;
- __le64 tqm_ring5_page_dir;
- __le64 tqm_ring6_page_dir;
- __le64 tqm_ring7_page_dir;
- __le64 mrav_page_dir;
- __le64 tim_page_dir;
- __le32 qp_num_entries;
- __le32 srq_num_entries;
- __le32 cq_num_entries;
- __le32 stat_num_entries;
- __le32 tqm_sp_num_entries;
- __le32 tqm_ring0_num_entries;
- __le32 tqm_ring1_num_entries;
- __le32 tqm_ring2_num_entries;
- __le32 tqm_ring3_num_entries;
- __le32 tqm_ring4_num_entries;
- __le32 tqm_ring5_num_entries;
- __le32 tqm_ring6_num_entries;
- __le32 tqm_ring7_num_entries;
- __le32 mrav_num_entries;
- __le32 tim_num_entries;
- __le16 qp_num_qp1_entries;
- __le16 qp_num_l2_entries;
- __le16 qp_entry_size;
- __le16 srq_num_l2_entries;
- __le16 srq_entry_size;
- __le16 cq_num_l2_entries;
- __le16 cq_entry_size;
- __le16 vnic_num_vnic_entries;
- __le16 vnic_num_ring_table_entries;
- __le16 vnic_entry_size;
- __le16 stat_entry_size;
- __le16 tqm_entry_size;
- __le16 mrav_entry_size;
- __le16 tim_entry_size;
- u8 tqm_ring8_pg_size_tqm_ring_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G
- u8 ring8_unused[3];
- __le32 tqm_ring8_num_entries;
- __le64 tqm_ring8_page_dir;
- u8 tqm_ring9_pg_size_tqm_ring_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G
- u8 ring9_unused[3];
- __le32 tqm_ring9_num_entries;
- __le64 tqm_ring9_page_dir;
- u8 tqm_ring10_pg_size_tqm_ring_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G
- u8 ring10_unused[3];
- __le32 tqm_ring10_num_entries;
- __le64 tqm_ring10_page_dir;
- __le32 tkc_num_entries;
- __le32 rkc_num_entries;
- __le64 tkc_page_dir;
- __le64 rkc_page_dir;
- __le16 tkc_entry_size;
- __le16 rkc_entry_size;
- u8 tkc_pg_size_tkc_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G
- u8 rkc_pg_size_rkc_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G
- __le16 qp_num_fast_qpmd_entries;
-};
-
-/* hwrm_func_backing_store_cfg_output (size:128b/16B) */
-struct hwrm_func_backing_store_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_error_recovery_qcfg_input (size:192b/24B) */
-struct hwrm_error_recovery_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 unused_0[8];
-};
-
-/* hwrm_error_recovery_qcfg_output (size:1664b/208B) */
-struct hwrm_error_recovery_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST 0x1UL
- #define ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU 0x2UL
- __le32 driver_polling_freq;
- __le32 master_func_wait_period;
- __le32 normal_func_wait_period;
- __le32 master_func_wait_period_after_reset;
- __le32 max_bailout_time_after_reset;
- __le32 fw_health_status_reg;
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_MASK 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_SFT 0
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_GRC 0x1UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR0 0x2UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_MASK 0xfffffffcUL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SFT 2
- __le32 fw_heartbeat_reg;
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_MASK 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_SFT 0
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_GRC 0x1UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR0 0x2UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_MASK 0xfffffffcUL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SFT 2
- __le32 fw_reset_cnt_reg;
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_MASK 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_SFT 0
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_GRC 0x1UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR0 0x2UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_MASK 0xfffffffcUL
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SFT 2
- __le32 reset_inprogress_reg;
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_MASK 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_SFT 0
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_GRC 0x1UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR0 0x2UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_MASK 0xfffffffcUL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SFT 2
- __le32 reset_inprogress_reg_mask;
- u8 unused_0[3];
- u8 reg_array_cnt;
- __le32 reset_reg[16];
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_MASK 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_SFT 0
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_GRC 0x1UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR0 0x2UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_MASK 0xfffffffcUL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SFT 2
- __le32 reset_reg_val[16];
- u8 delay_after_reset[16];
- __le32 err_recovery_cnt_reg;
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_MASK 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_SFT 0
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_GRC 0x1UL
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR0 0x2UL
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_MASK 0xfffffffcUL
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SFT 2
- u8 unused_1[3];
- u8 valid;
-};
-
-/* hwrm_func_echo_response_input (size:192b/24B) */
-struct hwrm_func_echo_response_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 event_data1;
- __le32 event_data2;
-};
-
-/* hwrm_func_echo_response_output (size:128b/16B) */
-struct hwrm_func_echo_response_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_ptp_pin_qcfg_input (size:192b/24B) */
-struct hwrm_func_ptp_pin_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 unused_0[8];
-};
-
-/* hwrm_func_ptp_pin_qcfg_output (size:128b/16B) */
-struct hwrm_func_ptp_pin_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_pins;
- u8 state;
- #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN0_ENABLED 0x1UL
- #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN1_ENABLED 0x2UL
- #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN2_ENABLED 0x4UL
- #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN3_ENABLED 0x8UL
- u8 pin0_usage;
- #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT
- u8 pin1_usage;
- #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT
- u8 pin2_usage;
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT
- u8 pin3_usage;
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_func_ptp_pin_cfg_input (size:256b/32B) */
-struct hwrm_func_ptp_pin_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_STATE 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_USAGE 0x2UL
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_STATE 0x4UL
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_USAGE 0x8UL
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_STATE 0x10UL
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_USAGE 0x20UL
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_STATE 0x40UL
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_USAGE 0x80UL
- u8 pin0_state;
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_DISABLED 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED
- u8 pin0_usage;
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT
- u8 pin1_state;
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_DISABLED 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED
- u8 pin1_usage;
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT
- u8 pin2_state;
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_DISABLED 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED
- u8 pin2_usage;
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT
- u8 pin3_state;
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_DISABLED 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED
- u8 pin3_usage;
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT
- u8 unused_0[4];
-};
-
-/* hwrm_func_ptp_pin_cfg_output (size:128b/16B) */
-struct hwrm_func_ptp_pin_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_ptp_cfg_input (size:384b/48B) */
-struct hwrm_func_ptp_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 enables;
- #define FUNC_PTP_CFG_REQ_ENABLES_PTP_PPS_EVENT 0x1UL
- #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_SOURCE 0x2UL
- #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_PHASE 0x4UL
- #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PERIOD 0x8UL
- #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_UP 0x10UL
- #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PHASE 0x20UL
- #define FUNC_PTP_CFG_REQ_ENABLES_PTP_SET_TIME 0x40UL
- u8 ptp_pps_event;
- #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_INTERNAL 0x1UL
- #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_EXTERNAL 0x2UL
- u8 ptp_freq_adj_dll_source;
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_NONE 0x0UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_0 0x1UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_1 0x2UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_2 0x3UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_3 0x4UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_0 0x5UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_1 0x6UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_2 0x7UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_3 0x8UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID 0xffUL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID
- u8 ptp_freq_adj_dll_phase;
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_NONE 0x0UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_4K 0x1UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_8K 0x2UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_10M 0x3UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_25M 0x4UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_25M
- u8 unused_0[3];
- __le32 ptp_freq_adj_ext_period;
- __le32 ptp_freq_adj_ext_up;
- __le32 ptp_freq_adj_ext_phase_lower;
- __le32 ptp_freq_adj_ext_phase_upper;
- __le64 ptp_set_time;
-};
-
-/* hwrm_func_ptp_cfg_output (size:128b/16B) */
-struct hwrm_func_ptp_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_ptp_ts_query_input (size:192b/24B) */
-struct hwrm_func_ptp_ts_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PPS_TIME 0x1UL
- #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PTM_TIME 0x2UL
- u8 unused_0[4];
-};
-
-/* hwrm_func_ptp_ts_query_output (size:320b/40B) */
-struct hwrm_func_ptp_ts_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 pps_event_ts;
- __le64 ptm_local_ts;
- __le64 ptm_system_ts;
- __le32 ptm_link_delay;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_func_ptp_ext_cfg_input (size:256b/32B) */
-struct hwrm_func_ptp_ext_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 enables;
- #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_MASTER_FID 0x1UL
- #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_FID 0x2UL
- #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_MODE 0x4UL
- #define FUNC_PTP_EXT_CFG_REQ_ENABLES_FAILOVER_TIMER 0x8UL
- __le16 phc_master_fid;
- __le16 phc_sec_fid;
- u8 phc_sec_mode;
- #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_SWITCH 0x0UL
- #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_ALL 0x1UL
- #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY 0x2UL
- #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_LAST FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY
- u8 unused_0;
- __le32 failover_timer;
- u8 unused_1[4];
-};
-
-/* hwrm_func_ptp_ext_cfg_output (size:128b/16B) */
-struct hwrm_func_ptp_ext_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_ptp_ext_qcfg_input (size:192b/24B) */
-struct hwrm_func_ptp_ext_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 unused_0[8];
-};
-
-/* hwrm_func_ptp_ext_qcfg_output (size:256b/32B) */
-struct hwrm_func_ptp_ext_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 phc_master_fid;
- __le16 phc_sec_fid;
- __le16 phc_active_fid0;
- __le16 phc_active_fid1;
- __le32 last_failover_event;
- __le16 from_fid;
- __le16 to_fid;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_backing_store_cfg_v2_input (size:512b/64B) */
-struct hwrm_func_backing_store_cfg_v2_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 type;
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TX_CK 0x13UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RX_CK 0x14UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA0_TRACE 0x26UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA1_TRACE 0x27UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA2_TRACE 0x28UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
- __le16 instance;
- __le32 flags;
- #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE 0x2UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_EXTEND 0x4UL
- __le64 page_dir;
- __le32 num_entries;
- __le16 entry_size;
- u8 page_size_pbl_level;
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_SFT 0
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G
- u8 subtype_valid_cnt;
- __le32 split_entry_0;
- __le32 split_entry_1;
- __le32 split_entry_2;
- __le32 split_entry_3;
- __le32 enables;
- #define FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET 0x1UL
- __le32 next_bs_offset;
-};
-
-/* hwrm_func_backing_store_cfg_v2_output (size:128b/16B) */
-struct hwrm_func_backing_store_cfg_v2_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 rsvd0[7];
- u8 valid;
-};
-
-/* hwrm_func_backing_store_qcfg_v2_input (size:192b/24B) */
-struct hwrm_func_backing_store_qcfg_v2_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 type;
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TX_CK 0x13UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RX_CK 0x14UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_XID_PARTITION_TABLE 0x1dUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA0_TRACE 0x26UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA1_TRACE 0x27UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA2_TRACE 0x28UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
- __le16 instance;
- u8 rsvd[4];
-};
-
-/* hwrm_func_backing_store_qcfg_v2_output (size:448b/56B) */
-struct hwrm_func_backing_store_qcfg_v2_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 type;
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TX_CK 0x13UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RX_CK 0x14UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT_TRACE 0x1eUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT_TRACE 0x20UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT2_TRACE 0x21UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TTX_PACING_TQM_RING 0x25UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA0_TRACE 0x26UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA1_TRACE 0x27UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA2_TRACE 0x28UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP1_TRACE 0x29UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
- __le16 instance;
- __le32 flags;
- __le64 page_dir;
- __le32 num_entries;
- u8 page_size_pbl_level;
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_MASK 0xfUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_SFT 0
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_SFT 4
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G
- u8 subtype_valid_cnt;
- u8 rsvd[2];
- __le32 split_entry_0;
- __le32 split_entry_1;
- __le32 split_entry_2;
- __le32 split_entry_3;
- u8 rsvd2[7];
- u8 valid;
-};
-
-/* qpc_split_entries (size:128b/16B) */
-struct qpc_split_entries {
- __le32 qp_num_l2_entries;
- __le32 qp_num_qp1_entries;
- __le32 qp_num_fast_qpmd_entries;
- __le32 rsvd;
-};
-
-/* srq_split_entries (size:128b/16B) */
-struct srq_split_entries {
- __le32 srq_num_l2_entries;
- __le32 rsvd;
- __le32 rsvd2[2];
-};
-
-/* cq_split_entries (size:128b/16B) */
-struct cq_split_entries {
- __le32 cq_num_l2_entries;
- __le32 rsvd;
- __le32 rsvd2[2];
-};
-
-/* vnic_split_entries (size:128b/16B) */
-struct vnic_split_entries {
- __le32 vnic_num_vnic_entries;
- __le32 rsvd;
- __le32 rsvd2[2];
-};
-
-/* mrav_split_entries (size:128b/16B) */
-struct mrav_split_entries {
- __le32 mrav_num_av_entries;
- __le32 rsvd;
- __le32 rsvd2[2];
-};
-
-/* ts_split_entries (size:128b/16B) */
-struct ts_split_entries {
- __le32 region_num_entries;
- u8 tsid;
- u8 lkup_static_bkt_cnt_exp[2];
- u8 locked;
- __le32 rsvd2[2];
-};
-
-/* ck_split_entries (size:128b/16B) */
-struct ck_split_entries {
- __le32 num_quic_entries;
- __le32 rsvd;
- __le32 rsvd2[2];
-};
-
-/* hwrm_func_backing_store_qcaps_v2_input (size:192b/24B) */
-struct hwrm_func_backing_store_qcaps_v2_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 type;
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK 0x13UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK 0x14UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE 0x1eUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE 0x20UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE 0x21UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA0_TRACE 0x26UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA1_TRACE 0x27UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA2_TRACE 0x28UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
- u8 rsvd[6];
-};
-
-/* hwrm_func_backing_store_qcaps_v2_output (size:448b/56B) */
-struct hwrm_func_backing_store_qcaps_v2_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 type;
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TX_CK 0x13UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RX_CK 0x14UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT_TRACE 0x1eUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT_TRACE 0x20UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT2_TRACE 0x21UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TTX_PACING_TQM_RING 0x25UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA0_TRACE 0x26UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA1_TRACE 0x27UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA2_TRACE 0x28UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP1_TRACE 0x29UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
- __le16 entry_size;
- __le32 flags;
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ROCE_QP_PSEUDO_STATIC_ALLOC 0x8UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_DBG_TRACE 0x10UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_BIN_DBG_TRACE 0x20UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_NEXT_BS_OFFSET 0x40UL
- __le32 instance_bit_map;
- u8 ctx_init_value;
- u8 ctx_init_offset;
- u8 entry_multiple;
- u8 rsvd;
- __le32 max_num_entries;
- __le32 min_num_entries;
- __le16 next_valid_type;
- u8 subtype_valid_cnt;
- u8 exact_cnt_bit_map;
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_0_EXACT 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_1_EXACT 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_2_EXACT 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_3_EXACT 0x8UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_MASK 0xf0UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_SFT 4
- __le32 split_entry_0;
- __le32 split_entry_1;
- __le32 split_entry_2;
- __le32 split_entry_3;
- __le16 max_instance_count;
- u8 rsvd3;
- u8 valid;
-};
-
-/* hwrm_func_dbr_pacing_qcfg_input (size:128b/16B) */
-struct hwrm_func_dbr_pacing_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_func_dbr_pacing_qcfg_output (size:512b/64B) */
-struct hwrm_func_dbr_pacing_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define FUNC_DBR_PACING_QCFG_RESP_FLAGS_DBR_NQ_EVENT_ENABLED 0x1UL
- u8 unused_0[7];
- __le32 dbr_stat_db_fifo_reg;
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK 0x3UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_SFT 0
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC 0x1UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR0 0x2UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1 0x3UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_LAST FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_MASK 0xfffffffcUL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SFT 2
- __le32 dbr_stat_db_fifo_reg_watermark_mask;
- u8 dbr_stat_db_fifo_reg_watermark_shift;
- u8 unused_1[3];
- __le32 dbr_stat_db_fifo_reg_fifo_room_mask;
- u8 dbr_stat_db_fifo_reg_fifo_room_shift;
- u8 unused_2[3];
- __le32 dbr_throttling_aeq_arm_reg;
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_MASK 0x3UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_SFT 0
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_GRC 0x1UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR0 0x2UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1 0x3UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_LAST FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_MASK 0xfffffffcUL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SFT 2
- u8 dbr_throttling_aeq_arm_reg_val;
- u8 unused_3[3];
- __le32 dbr_stat_db_max_fifo_depth;
- __le32 primary_nq_id;
- __le32 pacing_threshold;
- u8 unused_4[7];
- u8 valid;
-};
-
-/* hwrm_func_drv_if_change_input (size:192b/24B) */
-struct hwrm_func_drv_if_change_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP 0x1UL
- __le32 unused;
-};
-
-/* hwrm_func_drv_if_change_output (size:128b/16B) */
-struct hwrm_func_drv_if_change_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL
- #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE 0x2UL
- #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE 0x4UL
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_port_phy_cfg_input (size:512b/64B) */
-struct hwrm_port_phy_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
- #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
- #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
- #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE 0x8000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE 0x10000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_ENABLE 0x20000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_DISABLE 0x40000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_ENABLE 0x80000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_DISABLE 0x100000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_ENABLE 0x200000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_DISABLE 0x400000UL
- __le32 enables;
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
- #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
- #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
- #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
- #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
- #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
- #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
- #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED 0x800UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK 0x1000UL
- #define PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2 0x2000UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK 0x4000UL
- __le16 port_id;
- __le16 force_link_speed;
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB
- u8 auto_mode;
- #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_LAST PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK
- u8 auto_duplex;
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_LAST PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH
- u8 auto_pause;
- #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
- u8 mgmt_flag;
- #define PORT_PHY_CFG_REQ_MGMT_FLAG_LINK_RELEASE 0x1UL
- #define PORT_PHY_CFG_REQ_MGMT_FLAG_MGMT_VALID 0x80UL
- __le16 auto_link_speed;
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB
- __le16 auto_link_speed_mask;
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
- u8 wirespeed;
- #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
- #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
- #define PORT_PHY_CFG_REQ_WIRESPEED_LAST PORT_PHY_CFG_REQ_WIRESPEED_ON
- u8 lpbk;
- #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
- #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
- #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
- #define PORT_PHY_CFG_REQ_LPBK_EXTERNAL 0x3UL
- #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_EXTERNAL
- u8 force_pause;
- #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
- #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
- u8 unused_1;
- __le32 preemphasis;
- __le16 eee_link_speed_mask;
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
- __le16 force_pam4_link_speed;
- #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
- #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB
- __le32 tx_lpi_timer;
- #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
- #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
- __le16 auto_link_pam4_speed_mask;
- #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_50G 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_100G 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_200G 0x4UL
- __le16 force_link_speeds2;
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_1GB 0xaUL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_10GB 0x64UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_25GB 0xfaUL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_40GB 0x190UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB 0x1f4UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112
- __le16 auto_link_speeds2_mask;
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_1GB 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_10GB 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_25GB 0x4UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_40GB 0x8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB 0x10UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB 0x20UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB_PAM4_56 0x40UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_56 0x80UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_56 0x100UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_56 0x200UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_112 0x400UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_112 0x800UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_112 0x1000UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_800GB_PAM4_112 0x2000UL
- u8 unused_2[6];
-};
-
-/* hwrm_port_phy_cfg_output (size:128b/16B) */
-struct hwrm_port_phy_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_port_phy_cfg_cmd_err (size:64b/8B) */
-struct hwrm_port_phy_cfg_cmd_err {
- u8 code;
- #define PORT_PHY_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define PORT_PHY_CFG_CMD_ERR_CODE_ILLEGAL_SPEED 0x1UL
- #define PORT_PHY_CFG_CMD_ERR_CODE_RETRY 0x2UL
- #define PORT_PHY_CFG_CMD_ERR_CODE_LAST PORT_PHY_CFG_CMD_ERR_CODE_RETRY
- u8 unused_0[7];
-};
-
-/* hwrm_port_phy_qcfg_input (size:192b/24B) */
-struct hwrm_port_phy_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_port_phy_qcfg_output (size:832b/104B) */
-struct hwrm_port_phy_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 link;
- #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL
- #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK
- u8 active_fec_signal_mode;
- #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK 0xfUL
- #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_SFT 0
- #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ 0x0UL
- #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4 0x1UL
- #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112 0x2UL
- #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK 0xf0UL
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_SFT 4
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE (0x0UL << 4)
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE (0x1UL << 4)
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE (0x2UL << 4)
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE (0x3UL << 4)
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE (0x4UL << 4)
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE (0x5UL << 4)
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE (0x6UL << 4)
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_LAST PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE
- __le16 link_speed;
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_400GB 0xfa0UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_800GB 0x1f40UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
- u8 duplex_cfg;
- #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL
- #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL
- #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_LAST PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL
- u8 pause;
- #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
- __le16 support_speeds;
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
- __le16 force_link_speed;
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB
- u8 auto_mode;
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK
- u8 auto_pause;
- #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
- __le16 auto_link_speed;
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB
- __le16 auto_link_speed_mask;
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
- u8 wirespeed;
- #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
- #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
- #define PORT_PHY_QCFG_RESP_WIRESPEED_LAST PORT_PHY_QCFG_RESP_WIRESPEED_ON
- u8 lpbk;
- #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
- #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
- #define PORT_PHY_QCFG_RESP_LPBK_EXTERNAL 0x3UL
- #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_EXTERNAL
- u8 force_pause;
- #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
- u8 module_status;
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT 0x5UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_OVERHEATED 0x6UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE
- __le32 preemphasis;
- u8 phy_maj;
- u8 phy_min;
- u8 phy_bld;
- u8 phy_type;
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4 0x1cUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4 0x1dUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4 0x1eUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4 0x1fUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR 0x20UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR 0x21UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR 0x22UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER 0x23UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2 0x24UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2 0x25UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2 0x26UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2 0x27UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR 0x28UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR 0x29UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR 0x2aUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER 0x2bUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2 0x2cUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2 0x2dUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2 0x2eUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2 0x2fUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8 0x30UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8 0x31UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8 0x32UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8 0x33UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4 0x34UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4 0x35UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4 0x36UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4 0x37UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASECR8 0x38UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASESR8 0x39UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASELR8 0x3aUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEER8 0x3bUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEFR8 0x3cUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8 0x3dUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8
- u8 media_type;
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_BACKPLANE 0x4UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_LAST PORT_PHY_QCFG_RESP_MEDIA_TYPE_BACKPLANE
- u8 xcvr_pkg_type;
- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL
- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL
- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL
- u8 eee_config_phy_addr;
- #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
- #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL
- u8 parallel_detect;
- #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL
- __le16 link_partner_adv_speeds;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL
- u8 link_partner_adv_auto_mode;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW 0x3UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK
- u8 link_partner_adv_pause;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
- __le16 adv_eee_link_speed_mask;
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
- __le16 link_partner_adv_eee_link_speed_mask;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
- __le32 xcvr_identifier_type_tx_lpi_timer;
- #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL
- #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN (0x0UL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP (0x3UL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPDD (0x18UL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP112 (0x1eUL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFPDD (0x1fUL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP (0x20UL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP
- __le16 fec_cfg;
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_SUPPORTED 0x80UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED 0x100UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_SUPPORTED 0x200UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_ENABLED 0x400UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_SUPPORTED 0x800UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_ENABLED 0x1000UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_SUPPORTED 0x2000UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_ENABLED 0x4000UL
- u8 duplex_state;
- #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
- #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
- #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_LAST PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
- u8 option_flags;
- #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL
- #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN 0x2UL
- #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SPEEDS2_SUPPORTED 0x4UL
- char phy_vendor_name[16];
- char phy_vendor_partnumber[16];
- __le16 support_pam4_speeds;
- #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G 0x1UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G 0x2UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G 0x4UL
- __le16 force_pam4_link_speed;
- #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
- #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB
- __le16 auto_pam4_link_speed_mask;
- #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_50G 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_100G 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_200G 0x4UL
- u8 link_partner_pam4_adv_speeds;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_50GB 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_100GB 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL
- u8 link_down_reason;
- #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_OTP_SPEED_VIOLATION 0x2UL
- __le16 support_speeds2;
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_1GB 0x1UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_10GB 0x2UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_25GB 0x4UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_40GB 0x8UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB 0x10UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB 0x20UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB_PAM4_56 0x40UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_56 0x80UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_56 0x100UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_56 0x200UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_112 0x400UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_112 0x800UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_112 0x1000UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_800GB_PAM4_112 0x2000UL
- __le16 force_link_speeds2;
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_1GB 0xaUL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_10GB 0x64UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_25GB 0xfaUL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_40GB 0x190UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112
- __le16 auto_link_speeds2;
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_1GB 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_10GB 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_25GB 0x4UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_40GB 0x8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB 0x10UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB 0x20UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB_PAM4_56 0x40UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_56 0x80UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_56 0x100UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_56 0x200UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_112 0x400UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_112 0x800UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_112 0x1000UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_800GB_PAM4_112 0x2000UL
- u8 active_lanes;
- u8 valid;
-};
-
-/* hwrm_port_mac_cfg_input (size:448b/56B) */
-struct hwrm_port_mac_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
- #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL
- #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
- #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
- #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
- #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
- #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
- #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
- #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_ONE_STEP_TX_TS 0x2000UL
- #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE 0x4000UL
- #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE 0x8000UL
- __le32 enables;
- #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
- #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
- #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL
- #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
- #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
- #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
- #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
- #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
- #define PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB 0x200UL
- #define PORT_MAC_CFG_REQ_ENABLES_PTP_ADJ_PHASE 0x400UL
- #define PORT_MAC_CFG_REQ_ENABLES_PTP_LOAD_CONTROL 0x800UL
- __le16 port_id;
- u8 ipg;
- u8 lpbk;
- #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL
- #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL
- #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
- #define PORT_MAC_CFG_REQ_LPBK_LAST PORT_MAC_CFG_REQ_LPBK_REMOTE
- u8 vlan_pri2cos_map_pri;
- u8 reserved1;
- u8 tunnel_pri2cos_map_pri;
- u8 dscp2pri_map_pri;
- __le16 rx_ts_capture_ptp_msg_type;
- __le16 tx_ts_capture_ptp_msg_type;
- u8 cos_field_cfg;
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
- u8 unused_0[3];
- __le32 ptp_freq_adj_ppb;
- u8 unused_1[3];
- u8 ptp_load_control;
- #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_NONE 0x0UL
- #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_IMMEDIATE 0x1UL
- #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_PPS_EVENT 0x2UL
- #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_LAST PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_PPS_EVENT
- __le64 ptp_adj_phase;
-};
-
-/* hwrm_port_mac_cfg_output (size:128b/16B) */
-struct hwrm_port_mac_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 mru;
- __le16 mtu;
- u8 ipg;
- u8 lpbk;
- #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL
- #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL
- #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
- #define PORT_MAC_CFG_RESP_LPBK_LAST PORT_MAC_CFG_RESP_LPBK_REMOTE
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */
-struct hwrm_port_mac_ptp_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_port_mac_ptp_qcfg_output (size:704b/88B) */
-struct hwrm_port_mac_ptp_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS 0x4UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x8UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK 0x10UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED 0x20UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME 0x40UL
- u8 unused_0[3];
- __le32 rx_ts_reg_off_lower;
- __le32 rx_ts_reg_off_upper;
- __le32 rx_ts_reg_off_seq_id;
- __le32 rx_ts_reg_off_src_id_0;
- __le32 rx_ts_reg_off_src_id_1;
- __le32 rx_ts_reg_off_src_id_2;
- __le32 rx_ts_reg_off_domain_id;
- __le32 rx_ts_reg_off_fifo;
- __le32 rx_ts_reg_off_fifo_adv;
- __le32 rx_ts_reg_off_granularity;
- __le32 tx_ts_reg_off_lower;
- __le32 tx_ts_reg_off_upper;
- __le32 tx_ts_reg_off_seq_id;
- __le32 tx_ts_reg_off_fifo;
- __le32 tx_ts_reg_off_granularity;
- __le32 ts_ref_clock_reg_lower;
- __le32 ts_ref_clock_reg_upper;
- u8 unused_1[7];
- u8 valid;
-};
-
-/* tx_port_stats (size:3264b/408B) */
-struct tx_port_stats {
- __le64 tx_64b_frames;
- __le64 tx_65b_127b_frames;
- __le64 tx_128b_255b_frames;
- __le64 tx_256b_511b_frames;
- __le64 tx_512b_1023b_frames;
- __le64 tx_1024b_1518b_frames;
- __le64 tx_good_vlan_frames;
- __le64 tx_1519b_2047b_frames;
- __le64 tx_2048b_4095b_frames;
- __le64 tx_4096b_9216b_frames;
- __le64 tx_9217b_16383b_frames;
- __le64 tx_good_frames;
- __le64 tx_total_frames;
- __le64 tx_ucast_frames;
- __le64 tx_mcast_frames;
- __le64 tx_bcast_frames;
- __le64 tx_pause_frames;
- __le64 tx_pfc_frames;
- __le64 tx_jabber_frames;
- __le64 tx_fcs_err_frames;
- __le64 tx_control_frames;
- __le64 tx_oversz_frames;
- __le64 tx_single_dfrl_frames;
- __le64 tx_multi_dfrl_frames;
- __le64 tx_single_coll_frames;
- __le64 tx_multi_coll_frames;
- __le64 tx_late_coll_frames;
- __le64 tx_excessive_coll_frames;
- __le64 tx_frag_frames;
- __le64 tx_err;
- __le64 tx_tagged_frames;
- __le64 tx_dbl_tagged_frames;
- __le64 tx_runt_frames;
- __le64 tx_fifo_underruns;
- __le64 tx_pfc_ena_frames_pri0;
- __le64 tx_pfc_ena_frames_pri1;
- __le64 tx_pfc_ena_frames_pri2;
- __le64 tx_pfc_ena_frames_pri3;
- __le64 tx_pfc_ena_frames_pri4;
- __le64 tx_pfc_ena_frames_pri5;
- __le64 tx_pfc_ena_frames_pri6;
- __le64 tx_pfc_ena_frames_pri7;
- __le64 tx_eee_lpi_events;
- __le64 tx_eee_lpi_duration;
- __le64 tx_llfc_logical_msgs;
- __le64 tx_hcfc_msgs;
- __le64 tx_total_collisions;
- __le64 tx_bytes;
- __le64 tx_xthol_frames;
- __le64 tx_stat_discard;
- __le64 tx_stat_error;
-};
-
-/* rx_port_stats (size:4224b/528B) */
-struct rx_port_stats {
- __le64 rx_64b_frames;
- __le64 rx_65b_127b_frames;
- __le64 rx_128b_255b_frames;
- __le64 rx_256b_511b_frames;
- __le64 rx_512b_1023b_frames;
- __le64 rx_1024b_1518b_frames;
- __le64 rx_good_vlan_frames;
- __le64 rx_1519b_2047b_frames;
- __le64 rx_2048b_4095b_frames;
- __le64 rx_4096b_9216b_frames;
- __le64 rx_9217b_16383b_frames;
- __le64 rx_total_frames;
- __le64 rx_ucast_frames;
- __le64 rx_mcast_frames;
- __le64 rx_bcast_frames;
- __le64 rx_fcs_err_frames;
- __le64 rx_ctrl_frames;
- __le64 rx_pause_frames;
- __le64 rx_pfc_frames;
- __le64 rx_unsupported_opcode_frames;
- __le64 rx_unsupported_da_pausepfc_frames;
- __le64 rx_wrong_sa_frames;
- __le64 rx_align_err_frames;
- __le64 rx_oor_len_frames;
- __le64 rx_code_err_frames;
- __le64 rx_false_carrier_frames;
- __le64 rx_ovrsz_frames;
- __le64 rx_jbr_frames;
- __le64 rx_mtu_err_frames;
- __le64 rx_match_crc_frames;
- __le64 rx_promiscuous_frames;
- __le64 rx_tagged_frames;
- __le64 rx_double_tagged_frames;
- __le64 rx_trunc_frames;
- __le64 rx_good_frames;
- __le64 rx_pfc_xon2xoff_frames_pri0;
- __le64 rx_pfc_xon2xoff_frames_pri1;
- __le64 rx_pfc_xon2xoff_frames_pri2;
- __le64 rx_pfc_xon2xoff_frames_pri3;
- __le64 rx_pfc_xon2xoff_frames_pri4;
- __le64 rx_pfc_xon2xoff_frames_pri5;
- __le64 rx_pfc_xon2xoff_frames_pri6;
- __le64 rx_pfc_xon2xoff_frames_pri7;
- __le64 rx_pfc_ena_frames_pri0;
- __le64 rx_pfc_ena_frames_pri1;
- __le64 rx_pfc_ena_frames_pri2;
- __le64 rx_pfc_ena_frames_pri3;
- __le64 rx_pfc_ena_frames_pri4;
- __le64 rx_pfc_ena_frames_pri5;
- __le64 rx_pfc_ena_frames_pri6;
- __le64 rx_pfc_ena_frames_pri7;
- __le64 rx_sch_crc_err_frames;
- __le64 rx_undrsz_frames;
- __le64 rx_frag_frames;
- __le64 rx_eee_lpi_events;
- __le64 rx_eee_lpi_duration;
- __le64 rx_llfc_physical_msgs;
- __le64 rx_llfc_logical_msgs;
- __le64 rx_llfc_msgs_with_crc_err;
- __le64 rx_hcfc_msgs;
- __le64 rx_hcfc_msgs_with_crc_err;
- __le64 rx_bytes;
- __le64 rx_runt_bytes;
- __le64 rx_runt_frames;
- __le64 rx_stat_discard;
- __le64 rx_stat_err;
-};
-
-/* hwrm_port_qstats_input (size:320b/40B) */
-struct hwrm_port_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 flags;
- #define PORT_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
- u8 unused_0[5];
- __le64 tx_stat_host_addr;
- __le64 rx_stat_host_addr;
-};
-
-/* hwrm_port_qstats_output (size:128b/16B) */
-struct hwrm_port_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 tx_stat_size;
- __le16 rx_stat_size;
- u8 flags;
- #define PORT_QSTATS_RESP_FLAGS_CLEARED 0x1UL
- u8 unused_0[2];
- u8 valid;
-};
-
-/* tx_port_stats_ext (size:2048b/256B) */
-struct tx_port_stats_ext {
- __le64 tx_bytes_cos0;
- __le64 tx_bytes_cos1;
- __le64 tx_bytes_cos2;
- __le64 tx_bytes_cos3;
- __le64 tx_bytes_cos4;
- __le64 tx_bytes_cos5;
- __le64 tx_bytes_cos6;
- __le64 tx_bytes_cos7;
- __le64 tx_packets_cos0;
- __le64 tx_packets_cos1;
- __le64 tx_packets_cos2;
- __le64 tx_packets_cos3;
- __le64 tx_packets_cos4;
- __le64 tx_packets_cos5;
- __le64 tx_packets_cos6;
- __le64 tx_packets_cos7;
- __le64 pfc_pri0_tx_duration_us;
- __le64 pfc_pri0_tx_transitions;
- __le64 pfc_pri1_tx_duration_us;
- __le64 pfc_pri1_tx_transitions;
- __le64 pfc_pri2_tx_duration_us;
- __le64 pfc_pri2_tx_transitions;
- __le64 pfc_pri3_tx_duration_us;
- __le64 pfc_pri3_tx_transitions;
- __le64 pfc_pri4_tx_duration_us;
- __le64 pfc_pri4_tx_transitions;
- __le64 pfc_pri5_tx_duration_us;
- __le64 pfc_pri5_tx_transitions;
- __le64 pfc_pri6_tx_duration_us;
- __le64 pfc_pri6_tx_transitions;
- __le64 pfc_pri7_tx_duration_us;
- __le64 pfc_pri7_tx_transitions;
-};
-
-/* rx_port_stats_ext (size:3904b/488B) */
-struct rx_port_stats_ext {
- __le64 link_down_events;
- __le64 continuous_pause_events;
- __le64 resume_pause_events;
- __le64 continuous_roce_pause_events;
- __le64 resume_roce_pause_events;
- __le64 rx_bytes_cos0;
- __le64 rx_bytes_cos1;
- __le64 rx_bytes_cos2;
- __le64 rx_bytes_cos3;
- __le64 rx_bytes_cos4;
- __le64 rx_bytes_cos5;
- __le64 rx_bytes_cos6;
- __le64 rx_bytes_cos7;
- __le64 rx_packets_cos0;
- __le64 rx_packets_cos1;
- __le64 rx_packets_cos2;
- __le64 rx_packets_cos3;
- __le64 rx_packets_cos4;
- __le64 rx_packets_cos5;
- __le64 rx_packets_cos6;
- __le64 rx_packets_cos7;
- __le64 pfc_pri0_rx_duration_us;
- __le64 pfc_pri0_rx_transitions;
- __le64 pfc_pri1_rx_duration_us;
- __le64 pfc_pri1_rx_transitions;
- __le64 pfc_pri2_rx_duration_us;
- __le64 pfc_pri2_rx_transitions;
- __le64 pfc_pri3_rx_duration_us;
- __le64 pfc_pri3_rx_transitions;
- __le64 pfc_pri4_rx_duration_us;
- __le64 pfc_pri4_rx_transitions;
- __le64 pfc_pri5_rx_duration_us;
- __le64 pfc_pri5_rx_transitions;
- __le64 pfc_pri6_rx_duration_us;
- __le64 pfc_pri6_rx_transitions;
- __le64 pfc_pri7_rx_duration_us;
- __le64 pfc_pri7_rx_transitions;
- __le64 rx_bits;
- __le64 rx_buffer_passed_threshold;
- __le64 rx_pcs_symbol_err;
- __le64 rx_corrected_bits;
- __le64 rx_discard_bytes_cos0;
- __le64 rx_discard_bytes_cos1;
- __le64 rx_discard_bytes_cos2;
- __le64 rx_discard_bytes_cos3;
- __le64 rx_discard_bytes_cos4;
- __le64 rx_discard_bytes_cos5;
- __le64 rx_discard_bytes_cos6;
- __le64 rx_discard_bytes_cos7;
- __le64 rx_discard_packets_cos0;
- __le64 rx_discard_packets_cos1;
- __le64 rx_discard_packets_cos2;
- __le64 rx_discard_packets_cos3;
- __le64 rx_discard_packets_cos4;
- __le64 rx_discard_packets_cos5;
- __le64 rx_discard_packets_cos6;
- __le64 rx_discard_packets_cos7;
- __le64 rx_fec_corrected_blocks;
- __le64 rx_fec_uncorrectable_blocks;
- __le64 rx_filter_miss;
- __le64 rx_fec_symbol_err;
-};
-
-/* hwrm_port_qstats_ext_input (size:320b/40B) */
-struct hwrm_port_qstats_ext_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 tx_stat_size;
- __le16 rx_stat_size;
- u8 flags;
- #define PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x1UL
- u8 unused_0;
- __le64 tx_stat_host_addr;
- __le64 rx_stat_host_addr;
-};
-
-/* hwrm_port_qstats_ext_output (size:128b/16B) */
-struct hwrm_port_qstats_ext_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 tx_stat_size;
- __le16 rx_stat_size;
- __le16 total_active_cos_queues;
- u8 flags;
- #define PORT_QSTATS_EXT_RESP_FLAGS_CLEAR_ROCE_COUNTERS_SUPPORTED 0x1UL
- #define PORT_QSTATS_EXT_RESP_FLAGS_CLEARED 0x2UL
- u8 valid;
-};
-
-/* hwrm_port_lpbk_qstats_input (size:256b/32B) */
-struct hwrm_port_lpbk_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 lpbk_stat_size;
- u8 flags;
- #define PORT_LPBK_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
- u8 unused_0[5];
- __le64 lpbk_stat_host_addr;
-};
-
-/* hwrm_port_lpbk_qstats_output (size:128b/16B) */
-struct hwrm_port_lpbk_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 lpbk_stat_size;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* port_lpbk_stats (size:640b/80B) */
-struct port_lpbk_stats {
- __le64 lpbk_ucast_frames;
- __le64 lpbk_mcast_frames;
- __le64 lpbk_bcast_frames;
- __le64 lpbk_ucast_bytes;
- __le64 lpbk_mcast_bytes;
- __le64 lpbk_bcast_bytes;
- __le64 lpbk_tx_discards;
- __le64 lpbk_tx_errors;
- __le64 lpbk_rx_discards;
- __le64 lpbk_rx_errors;
-};
-
-/* hwrm_port_ecn_qstats_input (size:256b/32B) */
-struct hwrm_port_ecn_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 ecn_stat_buf_size;
- u8 flags;
- #define PORT_ECN_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
- u8 unused_0[3];
- __le64 ecn_stat_host_addr;
-};
-
-/* hwrm_port_ecn_qstats_output (size:128b/16B) */
-struct hwrm_port_ecn_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 ecn_stat_buf_size;
- u8 mark_en;
- u8 unused_0[4];
- u8 valid;
-};
-
-/* port_stats_ecn (size:512b/64B) */
-struct port_stats_ecn {
- __le64 mark_cnt_cos0;
- __le64 mark_cnt_cos1;
- __le64 mark_cnt_cos2;
- __le64 mark_cnt_cos3;
- __le64 mark_cnt_cos4;
- __le64 mark_cnt_cos5;
- __le64 mark_cnt_cos6;
- __le64 mark_cnt_cos7;
-};
-
-/* hwrm_port_clr_stats_input (size:192b/24B) */
-struct hwrm_port_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 flags;
- #define PORT_CLR_STATS_REQ_FLAGS_ROCE_COUNTERS 0x1UL
- u8 unused_0[5];
-};
-
-/* hwrm_port_clr_stats_output (size:128b/16B) */
-struct hwrm_port_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_port_lpbk_clr_stats_input (size:192b/24B) */
-struct hwrm_port_lpbk_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */
-struct hwrm_port_lpbk_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_port_ts_query_input (size:320b/40B) */
-struct hwrm_port_ts_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define PORT_TS_QUERY_REQ_FLAGS_PATH 0x1UL
- #define PORT_TS_QUERY_REQ_FLAGS_PATH_TX 0x0UL
- #define PORT_TS_QUERY_REQ_FLAGS_PATH_RX 0x1UL
- #define PORT_TS_QUERY_REQ_FLAGS_PATH_LAST PORT_TS_QUERY_REQ_FLAGS_PATH_RX
- #define PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME 0x2UL
- __le16 port_id;
- u8 unused_0[2];
- __le16 enables;
- #define PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT 0x1UL
- #define PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID 0x2UL
- #define PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET 0x4UL
- __le16 ts_req_timeout;
- __le32 ptp_seq_id;
- __le16 ptp_hdr_offset;
- u8 unused_1[6];
-};
-
-/* hwrm_port_ts_query_output (size:192b/24B) */
-struct hwrm_port_ts_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 ptp_msg_ts;
- __le16 ptp_msg_seqid;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_port_phy_qcaps_input (size:192b/24B) */
-struct hwrm_port_phy_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_port_phy_qcaps_output (size:320b/40B) */
-struct hwrm_port_phy_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET 0x10UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x20UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN 0x40UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS 0x80UL
- u8 port_cnt;
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_12 0xcUL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_12
- __le16 supported_speeds_force_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
- __le16 supported_speeds_auto_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
- __le16 supported_speeds_eee_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL
- __le32 tx_lpi_timer_low;
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0
- #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL
- #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24
- __le32 valid_tx_lpi_timer_high;
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
- #define PORT_PHY_QCAPS_RESP_RSVD_MASK 0xff000000UL
- #define PORT_PHY_QCAPS_RESP_RSVD_SFT 24
- __le16 supported_pam4_speeds_auto_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_50G 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_100G 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_200G 0x4UL
- __le16 supported_pam4_speeds_force_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_50G 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL
- __le16 flags2;
- #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
- #define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL
- #define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL
- #define PORT_PHY_QCAPS_RESP_FLAGS2_REMOTE_LPBK_UNSUPPORTED 0x10UL
- u8 internal_port_cnt;
- u8 unused_0;
- __le16 supported_speeds2_force_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_1GB 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_10GB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_25GB 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_40GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB_PAM4_56 0x40UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_56 0x80UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_56 0x100UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_56 0x200UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_112 0x400UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_112 0x800UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_112 0x1000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_800GB_PAM4_112 0x2000UL
- __le16 supported_speeds2_auto_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_1GB 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_10GB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_25GB 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_40GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB_PAM4_56 0x40UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_56 0x80UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_56 0x100UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_56 0x200UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_112 0x400UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_112 0x800UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_112 0x1000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_800GB_PAM4_112 0x2000UL
- u8 unused_1[3];
- u8 valid;
-};
-
-/* hwrm_port_phy_i2c_write_input (size:832b/104B) */
-struct hwrm_port_phy_i2c_write_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- __le32 enables;
- #define PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET 0x1UL
- #define PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER 0x2UL
- __le16 port_id;
- u8 i2c_slave_addr;
- u8 bank_number;
- __le16 page_number;
- __le16 page_offset;
- u8 data_length;
- u8 unused_1[7];
- __le32 data[16];
-};
-
-/* hwrm_port_phy_i2c_write_output (size:128b/16B) */
-struct hwrm_port_phy_i2c_write_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
-struct hwrm_port_phy_i2c_read_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- __le32 enables;
- #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL
- #define PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER 0x2UL
- __le16 port_id;
- u8 i2c_slave_addr;
- u8 bank_number;
- __le16 page_number;
- __le16 page_offset;
- u8 data_length;
- u8 unused_1[7];
-};
-
-/* hwrm_port_phy_i2c_read_output (size:640b/80B) */
-struct hwrm_port_phy_i2c_read_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 data[16];
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_port_phy_mdio_write_input (size:320b/40B) */
-struct hwrm_port_phy_mdio_write_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 unused_0[2];
- __le16 port_id;
- u8 phy_addr;
- u8 dev_addr;
- __le16 reg_addr;
- __le16 reg_data;
- u8 cl45_mdio;
- u8 unused_1[7];
-};
-
-/* hwrm_port_phy_mdio_write_output (size:128b/16B) */
-struct hwrm_port_phy_mdio_write_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_port_phy_mdio_read_input (size:256b/32B) */
-struct hwrm_port_phy_mdio_read_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 unused_0[2];
- __le16 port_id;
- u8 phy_addr;
- u8 dev_addr;
- __le16 reg_addr;
- u8 cl45_mdio;
- u8 unused_1;
-};
-
-/* hwrm_port_phy_mdio_read_output (size:128b/16B) */
-struct hwrm_port_phy_mdio_read_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 reg_data;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_port_led_cfg_input (size:512b/64B) */
-struct hwrm_port_led_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL
- __le16 port_id;
- u8 num_leds;
- u8 rsvd;
- u8 led0_id;
- u8 led0_state;
- #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL
- #define PORT_LED_CFG_REQ_LED0_STATE_LAST PORT_LED_CFG_REQ_LED0_STATE_BLINKALT
- u8 led0_color;
- #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_CFG_REQ_LED0_COLOR_LAST PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER
- u8 unused_0;
- __le16 led0_blink_on;
- __le16 led0_blink_off;
- u8 led0_group_id;
- u8 rsvd0;
- u8 led1_id;
- u8 led1_state;
- #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL
- #define PORT_LED_CFG_REQ_LED1_STATE_LAST PORT_LED_CFG_REQ_LED1_STATE_BLINKALT
- u8 led1_color;
- #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_CFG_REQ_LED1_COLOR_LAST PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER
- u8 unused_1;
- __le16 led1_blink_on;
- __le16 led1_blink_off;
- u8 led1_group_id;
- u8 rsvd1;
- u8 led2_id;
- u8 led2_state;
- #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL
- #define PORT_LED_CFG_REQ_LED2_STATE_LAST PORT_LED_CFG_REQ_LED2_STATE_BLINKALT
- u8 led2_color;
- #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_CFG_REQ_LED2_COLOR_LAST PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER
- u8 unused_2;
- __le16 led2_blink_on;
- __le16 led2_blink_off;
- u8 led2_group_id;
- u8 rsvd2;
- u8 led3_id;
- u8 led3_state;
- #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL
- #define PORT_LED_CFG_REQ_LED3_STATE_LAST PORT_LED_CFG_REQ_LED3_STATE_BLINKALT
- u8 led3_color;
- #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_CFG_REQ_LED3_COLOR_LAST PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER
- u8 unused_3;
- __le16 led3_blink_on;
- __le16 led3_blink_off;
- u8 led3_group_id;
- u8 rsvd3;
-};
-
-/* hwrm_port_led_cfg_output (size:128b/16B) */
-struct hwrm_port_led_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_port_led_qcfg_input (size:192b/24B) */
-struct hwrm_port_led_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_port_led_qcfg_output (size:448b/56B) */
-struct hwrm_port_led_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_leds;
- u8 led0_id;
- u8 led0_type;
- #define PORT_LED_QCFG_RESP_LED0_TYPE_SPEED 0x0UL
- #define PORT_LED_QCFG_RESP_LED0_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCFG_RESP_LED0_TYPE_INVALID 0xffUL
- #define PORT_LED_QCFG_RESP_LED0_TYPE_LAST PORT_LED_QCFG_RESP_LED0_TYPE_INVALID
- u8 led0_state;
- #define PORT_LED_QCFG_RESP_LED0_STATE_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED0_STATE_OFF 0x1UL
- #define PORT_LED_QCFG_RESP_LED0_STATE_ON 0x2UL
- #define PORT_LED_QCFG_RESP_LED0_STATE_BLINK 0x3UL
- #define PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT 0x4UL
- #define PORT_LED_QCFG_RESP_LED0_STATE_LAST PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT
- u8 led0_color;
- #define PORT_LED_QCFG_RESP_LED0_COLOR_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED0_COLOR_AMBER 0x1UL
- #define PORT_LED_QCFG_RESP_LED0_COLOR_GREEN 0x2UL
- #define PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_QCFG_RESP_LED0_COLOR_LAST PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER
- u8 unused_0;
- __le16 led0_blink_on;
- __le16 led0_blink_off;
- u8 led0_group_id;
- u8 led1_id;
- u8 led1_type;
- #define PORT_LED_QCFG_RESP_LED1_TYPE_SPEED 0x0UL
- #define PORT_LED_QCFG_RESP_LED1_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCFG_RESP_LED1_TYPE_INVALID 0xffUL
- #define PORT_LED_QCFG_RESP_LED1_TYPE_LAST PORT_LED_QCFG_RESP_LED1_TYPE_INVALID
- u8 led1_state;
- #define PORT_LED_QCFG_RESP_LED1_STATE_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED1_STATE_OFF 0x1UL
- #define PORT_LED_QCFG_RESP_LED1_STATE_ON 0x2UL
- #define PORT_LED_QCFG_RESP_LED1_STATE_BLINK 0x3UL
- #define PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT 0x4UL
- #define PORT_LED_QCFG_RESP_LED1_STATE_LAST PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT
- u8 led1_color;
- #define PORT_LED_QCFG_RESP_LED1_COLOR_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED1_COLOR_AMBER 0x1UL
- #define PORT_LED_QCFG_RESP_LED1_COLOR_GREEN 0x2UL
- #define PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_QCFG_RESP_LED1_COLOR_LAST PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER
- u8 unused_1;
- __le16 led1_blink_on;
- __le16 led1_blink_off;
- u8 led1_group_id;
- u8 led2_id;
- u8 led2_type;
- #define PORT_LED_QCFG_RESP_LED2_TYPE_SPEED 0x0UL
- #define PORT_LED_QCFG_RESP_LED2_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCFG_RESP_LED2_TYPE_INVALID 0xffUL
- #define PORT_LED_QCFG_RESP_LED2_TYPE_LAST PORT_LED_QCFG_RESP_LED2_TYPE_INVALID
- u8 led2_state;
- #define PORT_LED_QCFG_RESP_LED2_STATE_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED2_STATE_OFF 0x1UL
- #define PORT_LED_QCFG_RESP_LED2_STATE_ON 0x2UL
- #define PORT_LED_QCFG_RESP_LED2_STATE_BLINK 0x3UL
- #define PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT 0x4UL
- #define PORT_LED_QCFG_RESP_LED2_STATE_LAST PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT
- u8 led2_color;
- #define PORT_LED_QCFG_RESP_LED2_COLOR_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED2_COLOR_AMBER 0x1UL
- #define PORT_LED_QCFG_RESP_LED2_COLOR_GREEN 0x2UL
- #define PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_QCFG_RESP_LED2_COLOR_LAST PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER
- u8 unused_2;
- __le16 led2_blink_on;
- __le16 led2_blink_off;
- u8 led2_group_id;
- u8 led3_id;
- u8 led3_type;
- #define PORT_LED_QCFG_RESP_LED3_TYPE_SPEED 0x0UL
- #define PORT_LED_QCFG_RESP_LED3_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCFG_RESP_LED3_TYPE_INVALID 0xffUL
- #define PORT_LED_QCFG_RESP_LED3_TYPE_LAST PORT_LED_QCFG_RESP_LED3_TYPE_INVALID
- u8 led3_state;
- #define PORT_LED_QCFG_RESP_LED3_STATE_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED3_STATE_OFF 0x1UL
- #define PORT_LED_QCFG_RESP_LED3_STATE_ON 0x2UL
- #define PORT_LED_QCFG_RESP_LED3_STATE_BLINK 0x3UL
- #define PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT 0x4UL
- #define PORT_LED_QCFG_RESP_LED3_STATE_LAST PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT
- u8 led3_color;
- #define PORT_LED_QCFG_RESP_LED3_COLOR_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED3_COLOR_AMBER 0x1UL
- #define PORT_LED_QCFG_RESP_LED3_COLOR_GREEN 0x2UL
- #define PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_QCFG_RESP_LED3_COLOR_LAST PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER
- u8 unused_3;
- __le16 led3_blink_on;
- __le16 led3_blink_off;
- u8 led3_group_id;
- u8 unused_4[6];
- u8 valid;
-};
-
-/* hwrm_port_led_qcaps_input (size:192b/24B) */
-struct hwrm_port_led_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_port_led_qcaps_output (size:384b/48B) */
-struct hwrm_port_led_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_leds;
- u8 unused[3];
- u8 led0_id;
- u8 led0_type;
- #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL
- #define PORT_LED_QCAPS_RESP_LED0_TYPE_LAST PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID
- u8 led0_group_id;
- u8 unused_0;
- __le16 led0_state_caps;
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led0_color_caps;
- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 led1_id;
- u8 led1_type;
- #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL
- #define PORT_LED_QCAPS_RESP_LED1_TYPE_LAST PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID
- u8 led1_group_id;
- u8 unused_1;
- __le16 led1_state_caps;
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led1_color_caps;
- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 led2_id;
- u8 led2_type;
- #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL
- #define PORT_LED_QCAPS_RESP_LED2_TYPE_LAST PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID
- u8 led2_group_id;
- u8 unused_2;
- __le16 led2_state_caps;
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led2_color_caps;
- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 led3_id;
- u8 led3_type;
- #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL
- #define PORT_LED_QCAPS_RESP_LED3_TYPE_LAST PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID
- u8 led3_group_id;
- u8 unused_3;
- __le16 led3_state_caps;
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led3_color_caps;
- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 unused_4[3];
- u8 valid;
-};
-
-/* hwrm_port_mac_qcaps_input (size:192b/24B) */
-struct hwrm_port_mac_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_port_mac_qcaps_output (size:128b/16B) */
-struct hwrm_port_mac_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define PORT_MAC_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x1UL
- #define PORT_MAC_QCAPS_RESP_FLAGS_REMOTE_LPBK_SUPPORTED 0x2UL
- u8 unused_0[6];
- u8 valid;
-};
-
-/* hwrm_queue_qportcfg_input (size:192b/24B) */
-struct hwrm_queue_qportcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
- __le16 port_id;
- u8 drv_qmap_cap;
- #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_DISABLED 0x0UL
- #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED 0x1UL
- #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_LAST QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED
- u8 unused_0;
-};
-
-/* hwrm_queue_qportcfg_output (size:1344b/168B) */
-struct hwrm_queue_qportcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 max_configurable_queues;
- u8 max_configurable_lossless_queues;
- u8 queue_cfg_allowed;
- u8 queue_cfg_info;
- #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_USE_PROFILE_TYPE 0x2UL
- u8 queue_pfcenable_cfg_allowed;
- u8 queue_pri2cos_cfg_allowed;
- u8 queue_cos2bw_cfg_allowed;
- u8 queue_id0;
- u8 queue_id0_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN
- u8 queue_id1;
- u8 queue_id1_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN
- u8 queue_id2;
- u8 queue_id2_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN
- u8 queue_id3;
- u8 queue_id3_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN
- u8 queue_id4;
- u8 queue_id4_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN
- u8 queue_id5;
- u8 queue_id5_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN
- u8 queue_id6;
- u8 queue_id6_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN
- u8 queue_id7;
- u8 queue_id7_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
- u8 queue_id0_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_CNP 0x4UL
- char qid0_name[16];
- char qid1_name[16];
- char qid2_name[16];
- char qid3_name[16];
- char qid4_name[16];
- char qid5_name[16];
- char qid6_name[16];
- char qid7_name[16];
- u8 queue_id1_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_CNP 0x4UL
- u8 queue_id2_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_CNP 0x4UL
- u8 queue_id3_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_CNP 0x4UL
- u8 queue_id4_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_CNP 0x4UL
- u8 queue_id5_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_CNP 0x4UL
- u8 queue_id6_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_CNP 0x4UL
- u8 queue_id7_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_CNP 0x4UL
- u8 valid;
-};
-
-/* hwrm_queue_qcfg_input (size:192b/24B) */
-struct hwrm_queue_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_QCFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_QCFG_REQ_FLAGS_PATH_TX 0x0UL
- #define QUEUE_QCFG_REQ_FLAGS_PATH_RX 0x1UL
- #define QUEUE_QCFG_REQ_FLAGS_PATH_LAST QUEUE_QCFG_REQ_FLAGS_PATH_RX
- __le32 queue_id;
-};
-
-/* hwrm_queue_qcfg_output (size:128b/16B) */
-struct hwrm_queue_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 queue_len;
- u8 service_profile;
- #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LAST QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN
- u8 queue_cfg_info;
- #define QUEUE_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_queue_cfg_input (size:320b/40B) */
-struct hwrm_queue_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0
- #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
- __le32 enables;
- #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
- #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
- __le32 queue_id;
- __le32 dflt_len;
- u8 service_profile;
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LAST QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN
- u8 unused_0[7];
-};
-
-/* hwrm_queue_cfg_output (size:128b/16B) */
-struct hwrm_queue_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_queue_pfcenable_qcfg_input (size:192b/24B) */
-struct hwrm_queue_pfcenable_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_queue_pfcenable_qcfg_output (size:128b/16B) */
-struct hwrm_queue_pfcenable_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_queue_pfcenable_cfg_input (size:192b/24B) */
-struct hwrm_queue_pfcenable_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
- __le16 port_id;
- u8 unused_0[2];
-};
-
-/* hwrm_queue_pfcenable_cfg_output (size:128b/16B) */
-struct hwrm_queue_pfcenable_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_queue_pri2cos_qcfg_input (size:192b/24B) */
-struct hwrm_queue_pri2cos_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX 0x0UL
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX 0x1UL
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN 0x2UL
- u8 port_id;
- u8 unused_0[3];
-};
-
-/* hwrm_queue_pri2cos_qcfg_output (size:192b/24B) */
-struct hwrm_queue_pri2cos_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 pri0_cos_queue_id;
- u8 pri1_cos_queue_id;
- u8 pri2_cos_queue_id;
- u8 pri3_cos_queue_id;
- u8 pri4_cos_queue_id;
- u8 pri5_cos_queue_id;
- u8 pri6_cos_queue_id;
- u8 pri7_cos_queue_id;
- u8 queue_cfg_info;
- #define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
- u8 unused_0[6];
- u8 valid;
-};
-
-/* hwrm_queue_pri2cos_cfg_input (size:320b/40B) */
-struct hwrm_queue_pri2cos_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX 0x0UL
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX 0x1UL
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL
- __le32 enables;
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL
- u8 port_id;
- u8 pri0_cos_queue_id;
- u8 pri1_cos_queue_id;
- u8 pri2_cos_queue_id;
- u8 pri3_cos_queue_id;
- u8 pri4_cos_queue_id;
- u8 pri5_cos_queue_id;
- u8 pri6_cos_queue_id;
- u8 pri7_cos_queue_id;
- u8 unused_0[7];
-};
-
-/* hwrm_queue_pri2cos_cfg_output (size:128b/16B) */
-struct hwrm_queue_pri2cos_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_queue_cos2bw_qcfg_input (size:192b/24B) */
-struct hwrm_queue_cos2bw_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_queue_cos2bw_qcfg_output (size:896b/112B) */
-struct hwrm_queue_cos2bw_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 queue_id0;
- u8 unused_0;
- __le16 unused_1;
- __le32 queue_id0_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id0_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id0_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id0_pri_lvl;
- u8 queue_id0_bw_weight;
- struct {
- u8 queue_id;
- __le32 queue_id_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id_pri_lvl;
- u8 queue_id_bw_weight;
- } __packed cfg[7];
- u8 unused_2[4];
- u8 valid;
-};
-
-/* hwrm_queue_cos2bw_cfg_input (size:1024b/128B) */
-struct hwrm_queue_cos2bw_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- __le32 enables;
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL
- __le16 port_id;
- u8 queue_id0;
- u8 unused_0;
- __le32 queue_id0_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id0_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id0_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id0_pri_lvl;
- u8 queue_id0_bw_weight;
- struct {
- u8 queue_id;
- __le32 queue_id_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id_pri_lvl;
- u8 queue_id_bw_weight;
- } __packed cfg[7];
- u8 unused_1[5];
-};
-
-/* hwrm_queue_cos2bw_cfg_output (size:128b/16B) */
-struct hwrm_queue_cos2bw_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_queue_dscp_qcaps_input (size:192b/24B) */
-struct hwrm_queue_dscp_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 port_id;
- u8 unused_0[7];
-};
-
-/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */
-struct hwrm_queue_dscp_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_dscp_bits;
- u8 unused_0;
- __le16 max_entries;
- u8 unused_1[3];
- u8 valid;
-};
-
-/* hwrm_queue_dscp2pri_qcfg_input (size:256b/32B) */
-struct hwrm_queue_dscp2pri_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 dest_data_addr;
- u8 port_id;
- u8 unused_0;
- __le16 dest_data_buffer_size;
- u8 unused_1[4];
-};
-
-/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */
-struct hwrm_queue_dscp2pri_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 entry_cnt;
- u8 default_pri;
- u8 unused_0[4];
- u8 valid;
-};
-
-/* hwrm_queue_dscp2pri_cfg_input (size:320b/40B) */
-struct hwrm_queue_dscp2pri_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 src_data_addr;
- __le32 flags;
- #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL
- __le32 enables;
- #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL
- u8 port_id;
- u8 default_pri;
- __le16 entry_cnt;
- u8 unused_0[4];
-};
-
-/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */
-struct hwrm_queue_dscp2pri_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vnic_alloc_input (size:192b/24B) */
-struct hwrm_vnic_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
- #define VNIC_ALLOC_REQ_FLAGS_VIRTIO_NET_FID_VALID 0x2UL
- #define VNIC_ALLOC_REQ_FLAGS_VNIC_ID_VALID 0x4UL
- __le16 virtio_net_fid;
- __le16 vnic_id;
-};
-
-/* hwrm_vnic_alloc_output (size:128b/16B) */
-struct hwrm_vnic_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 vnic_id;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_vnic_update_input (size:256b/32B) */
-struct hwrm_vnic_update_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 vnic_id;
- __le32 enables;
- #define VNIC_UPDATE_REQ_ENABLES_VNIC_STATE_VALID 0x1UL
- #define VNIC_UPDATE_REQ_ENABLES_MRU_VALID 0x2UL
- #define VNIC_UPDATE_REQ_ENABLES_METADATA_FORMAT_TYPE_VALID 0x4UL
- u8 vnic_state;
- #define VNIC_UPDATE_REQ_VNIC_STATE_NORMAL 0x0UL
- #define VNIC_UPDATE_REQ_VNIC_STATE_DROP 0x1UL
- #define VNIC_UPDATE_REQ_VNIC_STATE_LAST VNIC_UPDATE_REQ_VNIC_STATE_DROP
- u8 metadata_format_type;
- #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_0 0x0UL
- #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_1 0x1UL
- #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_2 0x2UL
- #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_3 0x3UL
- #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4 0x4UL
- #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_LAST VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4
- __le16 mru;
- u8 unused_1[4];
-};
-
-/* hwrm_vnic_update_output (size:128b/16B) */
-struct hwrm_vnic_update_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vnic_free_input (size:192b/24B) */
-struct hwrm_vnic_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 vnic_id;
- u8 unused_0[4];
-};
-
-/* hwrm_vnic_free_output (size:128b/16B) */
-struct hwrm_vnic_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vnic_cfg_input (size:384b/48B) */
-struct hwrm_vnic_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL
- #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL
- #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL
- #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
- #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
- #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
- #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
- #define VNIC_CFG_REQ_FLAGS_PORTCOS_MAPPING_MODE 0x80UL
- __le32 enables;
- #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
- #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
- #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
- #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
- #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
- #define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID 0x20UL
- #define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL
- #define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL
- #define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL
- #define VNIC_CFG_REQ_ENABLES_L2_CQE_MODE 0x200UL
- #define VNIC_CFG_REQ_ENABLES_RAW_QP_ID 0x400UL
- __le16 vnic_id;
- __le16 dflt_ring_grp;
- __le16 rss_rule;
- __le16 cos_rule;
- __le16 lb_rule;
- __le16 mru;
- __le16 default_rx_ring_id;
- __le16 default_cmpl_ring_id;
- __le16 queue_id;
- u8 rx_csum_v2_mode;
- #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_DEFAULT 0x0UL
- #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_ALL_OK 0x1UL
- #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX 0x2UL
- #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_LAST VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX
- u8 l2_cqe_mode;
- #define VNIC_CFG_REQ_L2_CQE_MODE_DEFAULT 0x0UL
- #define VNIC_CFG_REQ_L2_CQE_MODE_COMPRESSED 0x1UL
- #define VNIC_CFG_REQ_L2_CQE_MODE_MIXED 0x2UL
- #define VNIC_CFG_REQ_L2_CQE_MODE_LAST VNIC_CFG_REQ_L2_CQE_MODE_MIXED
- __le32 raw_qp_id;
-};
-
-/* hwrm_vnic_cfg_output (size:128b/16B) */
-struct hwrm_vnic_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vnic_qcaps_input (size:192b/24B) */
-struct hwrm_vnic_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- u8 unused_0[4];
-};
-
-/* hwrm_vnic_qcaps_output (size:192b/24B) */
-struct hwrm_vnic_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 mru;
- u8 unused_0[2];
- __le32 flags;
- #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
- #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
- #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
- #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
- #define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL
- #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL
- #define VNIC_QCAPS_RESP_FLAGS_VNIC_STATE_CAP 0x400UL
- #define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL
- #define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_STRICT_HASH_TYPE_CAP 0x2000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP 0x4000UL
- #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CAP 0x8000UL
- #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_XOR_CAP 0x10000UL
- #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CHKSM_CAP 0x20000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP 0x40000UL
- #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V3_CAP 0x80000UL
- #define VNIC_QCAPS_RESP_FLAGS_L2_CQE_MODE_CAP 0x100000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP 0x200000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP 0x400000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP 0x800000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP 0x1000000UL
- #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_TRUSTED_VF_CAP 0x2000000UL
- #define VNIC_QCAPS_RESP_FLAGS_PORTCOS_MAPPING_MODE 0x4000000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED 0x8000000UL
- #define VNIC_QCAPS_RESP_FLAGS_VNIC_RSS_HASH_MODE_CAP 0x10000000UL
- #define VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP 0x20000000UL
- #define VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP 0x40000000UL
- __le16 max_aggs_supported;
- u8 unused_1[5];
- u8 valid;
-};
-
-/* hwrm_vnic_tpa_cfg_input (size:384b/48B) */
-struct hwrm_vnic_tpa_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL
- #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL
- #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL
- #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL
- #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL
- #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
- #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL
- #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL
- #define VNIC_TPA_CFG_REQ_FLAGS_AGG_PACK_AS_GRO 0x100UL
- __le32 enables;
- #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL
- #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL
- #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL
- #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
- #define VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN 0x10UL
- __le16 vnic_id;
- __le16 max_agg_segs;
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_LAST VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX
- __le16 max_aggs;
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_LAST VNIC_TPA_CFG_REQ_MAX_AGGS_MAX
- u8 unused_0[2];
- __le32 max_agg_timer;
- __le32 min_agg_len;
- __le32 tnl_tpa_en_bitmap;
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN 0x1UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE 0x2UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_NVGRE 0x4UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE 0x8UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 0x10UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6 0x20UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR1 0x200UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR2 0x400UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR3 0x800UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL
- u8 unused_1[4];
-};
-
-/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */
-struct hwrm_vnic_tpa_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */
-struct hwrm_vnic_tpa_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vnic_id;
- u8 unused_0[6];
-};
-
-/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */
-struct hwrm_vnic_tpa_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define VNIC_TPA_QCFG_RESP_FLAGS_TPA 0x1UL
- #define VNIC_TPA_QCFG_RESP_FLAGS_ENCAP_TPA 0x2UL
- #define VNIC_TPA_QCFG_RESP_FLAGS_RSC_WND_UPDATE 0x4UL
- #define VNIC_TPA_QCFG_RESP_FLAGS_GRO 0x8UL
- #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_ECN 0x10UL
- #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
- #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_IPID_CHECK 0x40UL
- #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_TTL_CHECK 0x80UL
- __le16 max_agg_segs;
- #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_1 0x0UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_2 0x1UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_4 0x2UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_8 0x3UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX 0x1fUL
- #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX
- __le16 max_aggs;
- #define VNIC_TPA_QCFG_RESP_MAX_AGGS_1 0x0UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGGS_2 0x1UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGGS_4 0x2UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGGS_8 0x3UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGGS_16 0x4UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX 0x7UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX
- __le32 max_agg_timer;
- __le32 min_agg_len;
- __le32 tnl_tpa_en_bitmap;
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN 0x1UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GENEVE 0x2UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_NVGRE 0x4UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE 0x8UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV4 0x10UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV6 0x20UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR1 0x200UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR2 0x400UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR3 0x800UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_vnic_rss_cfg_input (size:384b/48B) */
-struct hwrm_vnic_rss_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 hash_type;
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 0x80UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4 0x100UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 0x200UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6 0x400UL
- __le16 vnic_id;
- u8 ring_table_pair_index;
- u8 hash_mode_flags;
- #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT 0x1UL
- #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_4 0x2UL
- #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_2 0x4UL
- #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL
- #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL
- __le64 ring_grp_tbl_addr;
- __le64 hash_key_tbl_addr;
- __le16 rss_ctx_idx;
- u8 flags;
- #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE 0x1UL
- #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE 0x2UL
- #define VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT 0x4UL
- u8 ring_select_mode;
- #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ 0x0UL
- #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_XOR 0x1UL
- #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL
- #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_LAST VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM
- u8 unused_1[4];
-};
-
-/* hwrm_vnic_rss_cfg_output (size:128b/16B) */
-struct hwrm_vnic_rss_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vnic_rss_cfg_cmd_err (size:64b/8B) */
-struct hwrm_vnic_rss_cfg_cmd_err {
- u8 code;
- #define VNIC_RSS_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY 0x1UL
- #define VNIC_RSS_CFG_CMD_ERR_CODE_LAST VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY
- u8 unused_0[7];
-};
-
-/* hwrm_vnic_rss_qcfg_input (size:192b/24B) */
-struct hwrm_vnic_rss_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 rss_ctx_idx;
- __le16 vnic_id;
- u8 unused_0[4];
-};
-
-/* hwrm_vnic_rss_qcfg_output (size:512b/64B) */
-struct hwrm_vnic_rss_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 hash_type;
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV4 0x1UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV4 0x2UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV4 0x4UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6 0x8UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV6 0x10UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV6 0x20UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_AH_SPI_IPV4 0x80UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_ESP_SPI_IPV4 0x100UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_AH_SPI_IPV6 0x200UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_ESP_SPI_IPV6 0x400UL
- u8 unused_0[4];
- __le32 hash_key[10];
- u8 hash_mode_flags;
- #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_DEFAULT 0x1UL
- #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_4 0x2UL
- #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_2 0x4UL
- #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL
- #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL
- u8 ring_select_mode;
- #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ 0x0UL
- #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_XOR 0x1UL
- #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL
- #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_LAST VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ_CHECKSUM
- u8 unused_1[5];
- u8 valid;
-};
-
-/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */
-struct hwrm_vnic_plcmodes_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_VIRTIO_PLACEMENT 0x40UL
- __le32 enables;
- #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL
- #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL
- #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL
- #define VNIC_PLCMODES_CFG_REQ_ENABLES_MAX_BDS_VALID 0x8UL
- __le32 vnic_id;
- __le16 jumbo_thresh;
- __le16 hds_offset;
- __le16 hds_threshold;
- __le16 max_bds;
- u8 unused_0[4];
-};
-
-/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */
-struct hwrm_vnic_plcmodes_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vnic_plcmodes_cfg_cmd_err (size:64b/8B) */
-struct hwrm_vnic_plcmodes_cfg_cmd_err {
- u8 code;
- #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_INVALID_HDS_THRESHOLD 0x1UL
- #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_LAST VNIC_PLCMODES_CFG_CMD_ERR_CODE_INVALID_HDS_THRESHOLD
- u8 unused_0[7];
-};
-
-/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */
-struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */
-struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 rss_cos_lb_ctx_id;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */
-struct hwrm_vnic_rss_cos_lb_ctx_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 rss_cos_lb_ctx_id;
- u8 unused_0[6];
-};
-
-/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */
-struct hwrm_vnic_rss_cos_lb_ctx_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_ring_alloc_input (size:704b/88B) */
-struct hwrm_ring_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
- #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
- #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
- #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
- #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
- #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
- #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL
- #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL
- #define RING_ALLOC_REQ_ENABLES_STEERING_TAG_VALID 0x800UL
- #define RING_ALLOC_REQ_ENABLES_RX_RATE_PROFILE_VALID 0x1000UL
- u8 ring_type;
- #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
- #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
- #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- #define RING_ALLOC_REQ_RING_TYPE_RX_AGG 0x4UL
- #define RING_ALLOC_REQ_RING_TYPE_NQ 0x5UL
- #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_NQ
- u8 cmpl_coal_cnt;
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_OFF 0x0UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_4 0x1UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_8 0x2UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_12 0x3UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_16 0x4UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_24 0x5UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_32 0x6UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_48 0x7UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64 0x8UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_96 0x9UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_128 0xaUL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_192 0xbUL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_256 0xcUL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_320 0xdUL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_384 0xeUL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX 0xfUL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_LAST RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX
- __le16 flags;
- #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL
- #define RING_ALLOC_REQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x2UL
- #define RING_ALLOC_REQ_FLAGS_NQ_DBR_PACING 0x4UL
- #define RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE 0x8UL
- __le64 page_tbl_addr;
- __le32 fbo;
- u8 page_size;
- u8 page_tbl_depth;
- __le16 schq_id;
- __le32 length;
- __le16 logical_id;
- __le16 cmpl_ring_id;
- __le16 queue_id;
- __le16 rx_buf_size;
- __le16 rx_ring_id;
- __le16 nq_ring_id;
- __le16 ring_arb_cfg;
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP 0x1UL
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ 0x2UL
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
- #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL
- #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
- __le16 steering_tag;
- __le32 reserved3;
- __le32 stat_ctx_id;
- __le32 reserved4;
- __le32 max_bw;
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0
- #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL
- #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 int_mode;
- #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL
- #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL
- #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL
- #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
- #define RING_ALLOC_REQ_INT_MODE_LAST RING_ALLOC_REQ_INT_MODE_POLL
- u8 mpc_chnls_type;
- #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TCE 0x0UL
- #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RCE 0x1UL
- #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TE_CFA 0x2UL
- #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RE_CFA 0x3UL
- #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE 0x4UL
- #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_LAST RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE
- u8 rx_rate_profile_sel;
- #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_DEFAULT 0x0UL
- #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_POLL_MODE 0x1UL
- #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_LAST RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_POLL_MODE
- u8 unused_4;
- __le64 cq_handle;
-};
-
-/* hwrm_ring_alloc_output (size:128b/16B) */
-struct hwrm_ring_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 ring_id;
- __le16 logical_ring_id;
- u8 push_buffer_index;
- #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
- #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
- #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_LAST RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
- u8 unused_0[2];
- u8 valid;
-};
-
-/* hwrm_ring_free_input (size:256b/32B) */
-struct hwrm_ring_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 ring_type;
- #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_FREE_REQ_RING_TYPE_TX 0x1UL
- #define RING_FREE_REQ_RING_TYPE_RX 0x2UL
- #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- #define RING_FREE_REQ_RING_TYPE_RX_AGG 0x4UL
- #define RING_FREE_REQ_RING_TYPE_NQ 0x5UL
- #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_NQ
- u8 flags;
- #define RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID 0x1UL
- #define RING_FREE_REQ_FLAGS_LAST RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID
- __le16 ring_id;
- __le32 prod_idx;
- __le32 opaque;
- __le32 unused_1;
-};
-
-/* hwrm_ring_free_output (size:128b/16B) */
-struct hwrm_ring_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_ring_reset_input (size:192b/24B) */
-struct hwrm_ring_reset_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 ring_type;
- #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
- #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
- #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- #define RING_RESET_REQ_RING_TYPE_RX_RING_GRP 0x6UL
- #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_RX_RING_GRP
- u8 unused_0;
- __le16 ring_id;
- u8 unused_1[4];
-};
-
-/* hwrm_ring_reset_output (size:128b/16B) */
-struct hwrm_ring_reset_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 push_buffer_index;
- #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
- #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
- #define RING_RESET_RESP_PUSH_BUFFER_INDEX_LAST RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
- u8 unused_0[3];
- u8 consumer_idx[3];
- u8 valid;
-};
-
-/* hwrm_ring_aggint_qcaps_input (size:128b/16B) */
-struct hwrm_ring_aggint_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_ring_aggint_qcaps_output (size:384b/48B) */
-struct hwrm_ring_aggint_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 cmpl_params;
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN 0x1UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MAX 0x2UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET 0x4UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE 0x8UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR 0x10UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT 0x20UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR 0x40UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT 0x80UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT 0x100UL
- __le32 nq_params;
- #define RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN 0x1UL
- __le16 num_cmpl_dma_aggr_min;
- __le16 num_cmpl_dma_aggr_max;
- __le16 num_cmpl_dma_aggr_during_int_min;
- __le16 num_cmpl_dma_aggr_during_int_max;
- __le16 cmpl_aggr_dma_tmr_min;
- __le16 cmpl_aggr_dma_tmr_max;
- __le16 cmpl_aggr_dma_tmr_during_int_min;
- __le16 cmpl_aggr_dma_tmr_during_int_max;
- __le16 int_lat_tmr_min_min;
- __le16 int_lat_tmr_min_max;
- __le16 int_lat_tmr_max_min;
- __le16 int_lat_tmr_max_max;
- __le16 num_cmpl_aggr_int_min;
- __le16 num_cmpl_aggr_int_max;
- __le16 timer_units;
- u8 unused_0[1];
- u8 valid;
-};
-
-/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */
-struct hwrm_ring_cmpl_ring_qaggint_params_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 ring_id;
- __le16 flags;
- #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_MASK 0x3UL
- #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_SFT 0
- #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL
- u8 unused_0[4];
-};
-
-/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */
-struct hwrm_ring_cmpl_ring_qaggint_params_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 flags;
- #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
- #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
- __le16 num_cmpl_dma_aggr;
- __le16 num_cmpl_dma_aggr_during_int;
- __le16 cmpl_aggr_dma_tmr;
- __le16 cmpl_aggr_dma_tmr_during_int;
- __le16 int_lat_tmr_min;
- __le16 int_lat_tmr_max;
- __le16 num_cmpl_aggr_int;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_ring_cmpl_ring_cfg_aggint_params_input (size:320b/40B) */
-struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 ring_id;
- __le16 flags;
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL
- __le16 num_cmpl_dma_aggr;
- __le16 num_cmpl_dma_aggr_during_int;
- __le16 cmpl_aggr_dma_tmr;
- __le16 cmpl_aggr_dma_tmr_during_int;
- __le16 int_lat_tmr_min;
- __le16 int_lat_tmr_max;
- __le16 num_cmpl_aggr_int;
- __le16 enables;
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR 0x1UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT 0x2UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_CMPL_AGGR_DMA_TMR 0x4UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MIN 0x8UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MAX 0x10UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_AGGR_INT 0x20UL
- u8 unused_0[4];
-};
-
-/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */
-struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_ring_grp_alloc_input (size:192b/24B) */
-struct hwrm_ring_grp_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 cr;
- __le16 rr;
- __le16 ar;
- __le16 sc;
-};
-
-/* hwrm_ring_grp_alloc_output (size:128b/16B) */
-struct hwrm_ring_grp_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 ring_group_id;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_ring_grp_free_input (size:192b/24B) */
-struct hwrm_ring_grp_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 ring_group_id;
- u8 unused_0[4];
-};
-
-/* hwrm_ring_grp_free_output (size:128b/16B) */
-struct hwrm_ring_grp_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-#define DEFAULT_FLOW_ID 0xFFFFFFFFUL
-#define ROCEV1_FLOW_ID 0xFFFFFFFEUL
-#define ROCEV2_FLOW_ID 0xFFFFFFFDUL
-#define ROCEV2_CNP_FLOW_ID 0xFFFFFFFCUL
-
-/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */
-struct hwrm_cfa_l2_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_MASK 0x30UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_SFT 4
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 4)
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 4)
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 4)
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_XDP_DISABLE 0x40UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_SOURCE_VALID 0x80UL
- __le32 enables;
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS 0x20000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_NUM_VLANS 0x40000UL
- u8 l2_addr[6];
- u8 num_vlans;
- u8 t_num_vlans;
- u8 l2_addr_mask[6];
- __le16 l2_ovlan;
- __le16 l2_ovlan_mask;
- __le16 l2_ivlan;
- __le16 l2_ivlan_mask;
- u8 unused_1[2];
- u8 t_l2_addr[6];
- u8 unused_2[2];
- u8 t_l2_addr_mask[6];
- __le16 t_l2_ovlan;
- __le16 t_l2_ovlan_mask;
- __le16 t_l2_ivlan;
- __le16 t_l2_ivlan_mask;
- u8 src_type;
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG
- u8 unused_3;
- __le32 src_id;
- u8 tunnel_type;
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
- u8 unused_4;
- __le16 dst_id;
- __le16 mirror_vnic_id;
- u8 pri_hint;
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN
- u8 unused_5;
- __le32 unused_6;
- __le64 l2_filter_id_hint;
-};
-
-/* hwrm_cfa_l2_filter_alloc_output (size:192b/24B) */
-struct hwrm_cfa_l2_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 l2_filter_id;
- __le32 flow_id;
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_cfa_l2_filter_free_input (size:192b/24B) */
-struct hwrm_cfa_l2_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 l2_filter_id;
-};
-
-/* hwrm_cfa_l2_filter_free_output (size:128b/16B) */
-struct hwrm_cfa_l2_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_l2_filter_cfg_input (size:384b/48B) */
-struct hwrm_cfa_l2_filter_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_MASK 0x30UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_SFT 4
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_NO_UPDATE (0x0UL << 4)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_BYPASS_LKUP (0x1UL << 4)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP (0x2UL << 4)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_RESTORE_FW_OP (0x3UL << 4)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_RESTORE_FW_OP
- __le32 enables;
- #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
- #define CFA_L2_FILTER_CFG_REQ_ENABLES_PROF_FUNC 0x4UL
- #define CFA_L2_FILTER_CFG_REQ_ENABLES_L2_CONTEXT_ID 0x8UL
- __le64 l2_filter_id;
- __le32 dst_id;
- __le32 new_mirror_vnic_id;
- __le32 prof_func;
- __le32 l2_context_id;
-};
-
-/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */
-struct hwrm_cfa_l2_filter_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_l2_set_rx_mask_input (size:448b/56B) */
-struct hwrm_cfa_l2_set_rx_mask_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 vnic_id;
- __le32 mask;
- #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL
- __le64 mc_tbl_addr;
- __le32 num_mc_entries;
- u8 unused_0[4];
- __le64 vlan_tag_tbl_addr;
- __le32 num_vlan_tags;
- u8 unused_1[4];
-};
-
-/* hwrm_cfa_l2_set_rx_mask_output (size:128b/16B) */
-struct hwrm_cfa_l2_set_rx_mask_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_l2_set_rx_mask_cmd_err (size:64b/8B) */
-struct hwrm_cfa_l2_set_rx_mask_cmd_err {
- u8 code;
- #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR 0x1UL
- #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR
- u8 unused_0[7];
-};
-
-/* hwrm_cfa_tunnel_filter_alloc_input (size:704b/88B) */
-struct hwrm_cfa_tunnel_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
- __le32 enables;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
- __le64 l2_filter_id;
- u8 l2_addr[6];
- __le16 l2_ivlan;
- __le32 l3_addr[4];
- __le32 t_l3_addr[4];
- u8 l3_addr_type;
- u8 t_l3_addr_type;
- u8 tunnel_type;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
- u8 tunnel_flags;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR 0x1UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 0x2UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_EXTHDR_SEQNUM_S0 0x4UL
- __le32 vni;
- __le32 dst_vnic_id;
- __le32 mirror_vnic_id;
-};
-
-/* hwrm_cfa_tunnel_filter_alloc_output (size:192b/24B) */
-struct hwrm_cfa_tunnel_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 tunnel_filter_id;
- __le32 flow_id;
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_cfa_tunnel_filter_free_input (size:192b/24B) */
-struct hwrm_cfa_tunnel_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 tunnel_filter_id;
-};
-
-/* hwrm_cfa_tunnel_filter_free_output (size:128b/16B) */
-struct hwrm_cfa_tunnel_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vxlan_ipv4_hdr (size:128b/16B) */
-struct hwrm_vxlan_ipv4_hdr {
- u8 ver_hlen;
- #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK 0xfUL
- #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0
- #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK 0xf0UL
- #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4
- u8 tos;
- __be16 ip_id;
- __be16 flags_frag_offset;
- u8 ttl;
- u8 protocol;
- __be32 src_ip_addr;
- __be32 dest_ip_addr;
-};
-
-/* hwrm_vxlan_ipv6_hdr (size:320b/40B) */
-struct hwrm_vxlan_ipv6_hdr {
- __be32 ver_tc_flow_label;
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT 0x1cUL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK 0xf0000000UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT 0x14UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK 0xff00000UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT 0x0UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK 0xfffffUL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_LAST VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK
- __be16 payload_len;
- u8 next_hdr;
- u8 ttl;
- __be32 src_ip_addr[4];
- __be32 dest_ip_addr[4];
-};
-
-/* hwrm_cfa_encap_data_vxlan (size:640b/80B) */
-struct hwrm_cfa_encap_data_vxlan {
- u8 src_mac_addr[6];
- __le16 unused_0;
- u8 dst_mac_addr[6];
- u8 num_vlan_tags;
- u8 unused_1;
- __be16 ovlan_tpid;
- __be16 ovlan_tci;
- __be16 ivlan_tpid;
- __be16 ivlan_tci;
- __le32 l3[10];
- #define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK 0xfUL
- #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 0x4UL
- #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 0x6UL
- #define CFA_ENCAP_DATA_VXLAN_L3_LAST CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6
- __be16 src_port;
- __be16 dst_port;
- __be32 vni;
- u8 hdr_rsvd0[3];
- u8 hdr_rsvd1;
- u8 hdr_flags;
- u8 unused[3];
-};
-
-/* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */
-struct hwrm_cfa_encap_record_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_EXTERNAL 0x2UL
- u8 encap_type;
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_V4 0x9UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE_V1 0xaUL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE 0xbUL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE_V6 0xcUL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE 0x10UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE
- u8 unused_0[3];
- __le32 encap_data[20];
-};
-
-/* hwrm_cfa_encap_record_alloc_output (size:128b/16B) */
-struct hwrm_cfa_encap_record_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 encap_record_id;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_cfa_encap_record_free_input (size:192b/24B) */
-struct hwrm_cfa_encap_record_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 encap_record_id;
- u8 unused_0[4];
-};
-
-/* hwrm_cfa_encap_record_free_output (size:128b/16B) */
-struct hwrm_cfa_encap_record_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */
-struct hwrm_cfa_ntuple_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_ARP_REPLY 0x10UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX 0x20UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_NO_L2_CONTEXT 0x40UL
- __le32 enables;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX 0x80000UL
- __le64 l2_filter_id;
- u8 src_macaddr[6];
- __be16 ethertype;
- u8 ip_addr_type;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
- u8 ip_protocol;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_ICMP 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_ICMPV6 0x3aUL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD 0xffUL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD
- __le16 dst_id;
- __le16 rfs_ring_tbl_idx;
- u8 tunnel_type;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
- u8 pri_hint;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST 0x3UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST
- __be32 src_ipaddr[4];
- __be32 src_ipaddr_mask[4];
- __be32 dst_ipaddr[4];
- __be32 dst_ipaddr_mask[4];
- __be16 src_port;
- __be16 src_port_mask;
- __be16 dst_port;
- __be16 dst_port_mask;
- __le64 ntuple_filter_id_hint;
-};
-
-/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */
-struct hwrm_cfa_ntuple_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 ntuple_filter_id;
- __le32 flow_id;
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */
-struct hwrm_cfa_ntuple_filter_alloc_cmd_err {
- u8 code;
- #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR
- u8 unused_0[7];
-};
-
-/* hwrm_cfa_ntuple_filter_free_input (size:192b/24B) */
-struct hwrm_cfa_ntuple_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 ntuple_filter_id;
-};
-
-/* hwrm_cfa_ntuple_filter_free_output (size:128b/16B) */
-struct hwrm_cfa_ntuple_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_ntuple_filter_cfg_input (size:384b/48B) */
-struct hwrm_cfa_ntuple_filter_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL
- #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
- #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
- __le32 flags;
- #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL
- #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_RFS_RING_IDX 0x2UL
- #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_NO_L2_CONTEXT 0x4UL
- __le64 ntuple_filter_id;
- __le32 new_dst_id;
- __le32 new_mirror_vnic_id;
- __le16 new_meter_instance_id;
- #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL
- #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_LAST CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID
- u8 unused_1[6];
-};
-
-/* hwrm_cfa_ntuple_filter_cfg_output (size:128b/16B) */
-struct hwrm_cfa_ntuple_filter_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_decap_filter_alloc_input (size:832b/104B) */
-struct hwrm_cfa_decap_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL 0x1UL
- __le32 enables;
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x1UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID 0x2UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x4UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x8UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID 0x10UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID 0x20UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID 0x40UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID 0x80UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x100UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x200UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x400UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x800UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x1000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x2000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x4000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
- __be32 tunnel_id;
- u8 tunnel_type;
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
- u8 unused_0;
- __le16 unused_1;
- u8 src_macaddr[6];
- u8 unused_2[2];
- u8 dst_macaddr[6];
- __be16 ovlan_vid;
- __be16 ivlan_vid;
- __be16 t_ovlan_vid;
- __be16 t_ivlan_vid;
- __be16 ethertype;
- u8 ip_addr_type;
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
- u8 ip_protocol;
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP
- __le16 unused_3;
- __le32 unused_4;
- __be32 src_ipaddr[4];
- __be32 dst_ipaddr[4];
- __be16 src_port;
- __be16 dst_port;
- __le16 dst_id;
- __le16 l2_ctxt_ref_id;
-};
-
-/* hwrm_cfa_decap_filter_alloc_output (size:128b/16B) */
-struct hwrm_cfa_decap_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 decap_filter_id;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_cfa_decap_filter_free_input (size:192b/24B) */
-struct hwrm_cfa_decap_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 decap_filter_id;
- u8 unused_0[4];
-};
-
-/* hwrm_cfa_decap_filter_free_output (size:128b/16B) */
-struct hwrm_cfa_decap_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_flow_alloc_input (size:1024b/128B) */
-struct hwrm_cfa_flow_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 flags;
- #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
- #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_TX 0x40UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_RX 0x80UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_MATCH_VXLAN_IP_VNI 0x100UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_VHOST_ID_USE_VLAN 0x200UL
- __le16 src_fid;
- __le32 tunnel_handle;
- __le16 action_flags;
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FLOW_AGING_ENABLED 0x800UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_PRI_HINT 0x1000UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NO_FLOW_COUNTER_ALLOC 0x2000UL
- __le16 dst_fid;
- __be16 l2_rewrite_vlan_tpid;
- __be16 l2_rewrite_vlan_tci;
- __le16 act_meter_id;
- __le16 ref_flow_handle;
- __be16 ethertype;
- __be16 outer_vlan_tci;
- __be16 dmac[3];
- __be16 inner_vlan_tci;
- __be16 smac[3];
- u8 ip_dst_mask_len;
- u8 ip_src_mask_len;
- __be32 ip_dst[4];
- __be32 ip_src[4];
- __be16 l4_src_port;
- __be16 l4_src_port_mask;
- __be16 l4_dst_port;
- __be16 l4_dst_port_mask;
- __be32 nat_ip_address[4];
- __be16 l2_rewrite_dmac[3];
- __be16 nat_port;
- __be16 l2_rewrite_smac[3];
- u8 ip_proto;
- u8 tunnel_type;
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
-};
-
-/* hwrm_cfa_flow_alloc_output (size:256b/32B) */
-struct hwrm_cfa_flow_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 flow_handle;
- u8 unused_0[2];
- __le32 flow_id;
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX
- __le64 ext_flow_handle;
- __le32 flow_counter_id;
- u8 unused_1[3];
- u8 valid;
-};
-
-/* hwrm_cfa_flow_alloc_cmd_err (size:64b/8B) */
-struct hwrm_cfa_flow_alloc_cmd_err {
- u8 code;
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_L2_CONTEXT_TCAM 0x1UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_ACTION_RECORD 0x2UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_COUNTER 0x3UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_WILD_CARD_TCAM 0x4UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_HASH_COLLISION 0x5UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_KEY_EXISTS 0x6UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB 0x7UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_LAST CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB
- u8 unused_0[7];
-};
-
-/* hwrm_cfa_flow_free_input (size:256b/32B) */
-struct hwrm_cfa_flow_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 flow_handle;
- __le16 unused_0;
- __le32 flow_counter_id;
- __le64 ext_flow_handle;
-};
-
-/* hwrm_cfa_flow_free_output (size:256b/32B) */
-struct hwrm_cfa_flow_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 packet;
- __le64 byte;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_flow_info_input (size:256b/32B) */
-struct hwrm_cfa_flow_info_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 flow_handle;
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK 0xfffUL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT 0x1000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT 0x2000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_NIC_TX 0x3000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT 0x4000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX 0x8000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT_RX 0x9000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT_RX 0xa000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_NIC_RX 0xb000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT_RX 0xc000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_LAST CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT_RX
- u8 unused_0[6];
- __le64 ext_flow_handle;
-};
-
-/* hwrm_cfa_flow_info_output (size:5632b/704B) */
-struct hwrm_cfa_flow_info_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define CFA_FLOW_INFO_RESP_FLAGS_PATH_TX 0x1UL
- #define CFA_FLOW_INFO_RESP_FLAGS_PATH_RX 0x2UL
- u8 profile;
- __le16 src_fid;
- __le16 dst_fid;
- __le16 l2_ctxt_id;
- __le64 em_info;
- __le64 tcam_info;
- __le64 vfp_tcam_info;
- __le16 ar_id;
- __le16 flow_handle;
- __le32 tunnel_handle;
- __le16 flow_timer;
- u8 unused_0[6];
- __le32 flow_key_data[130];
- __le32 flow_action_info[30];
- u8 unused_1[7];
- u8 valid;
-};
-
-/* hwrm_cfa_flow_stats_input (size:640b/80B) */
-struct hwrm_cfa_flow_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 num_flows;
- __le16 flow_handle_0;
- __le16 flow_handle_1;
- __le16 flow_handle_2;
- __le16 flow_handle_3;
- __le16 flow_handle_4;
- __le16 flow_handle_5;
- __le16 flow_handle_6;
- __le16 flow_handle_7;
- __le16 flow_handle_8;
- __le16 flow_handle_9;
- u8 unused_0[2];
- __le32 flow_id_0;
- __le32 flow_id_1;
- __le32 flow_id_2;
- __le32 flow_id_3;
- __le32 flow_id_4;
- __le32 flow_id_5;
- __le32 flow_id_6;
- __le32 flow_id_7;
- __le32 flow_id_8;
- __le32 flow_id_9;
-};
-
-/* hwrm_cfa_flow_stats_output (size:1408b/176B) */
-struct hwrm_cfa_flow_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 packet_0;
- __le64 packet_1;
- __le64 packet_2;
- __le64 packet_3;
- __le64 packet_4;
- __le64 packet_5;
- __le64 packet_6;
- __le64 packet_7;
- __le64 packet_8;
- __le64 packet_9;
- __le64 byte_0;
- __le64 byte_1;
- __le64 byte_2;
- __le64 byte_3;
- __le64 byte_4;
- __le64 byte_5;
- __le64 byte_6;
- __le64 byte_7;
- __le64 byte_8;
- __le64 byte_9;
- __le16 flow_hits;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */
-struct hwrm_cfa_vfr_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vf_id;
- __le16 reserved;
- u8 unused_0[4];
- char vfr_name[32];
-};
-
-/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */
-struct hwrm_cfa_vfr_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 rx_cfa_code;
- __le16 tx_cfa_action;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_cfa_vfr_free_input (size:448b/56B) */
-struct hwrm_cfa_vfr_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- char vfr_name[32];
- __le16 vf_id;
- __le16 reserved;
- u8 unused_0[4];
-};
-
-/* hwrm_cfa_vfr_free_output (size:128b/16B) */
-struct hwrm_cfa_vfr_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_eem_qcaps_input (size:192b/24B) */
-struct hwrm_cfa_eem_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_EEM_QCAPS_REQ_FLAGS_PATH_TX 0x1UL
- #define CFA_EEM_QCAPS_REQ_FLAGS_PATH_RX 0x2UL
- #define CFA_EEM_QCAPS_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL
- __le32 unused_0;
-};
-
-/* hwrm_cfa_eem_qcaps_output (size:320b/40B) */
-struct hwrm_cfa_eem_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_TX 0x1UL
- #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_RX 0x2UL
- #define CFA_EEM_QCAPS_RESP_FLAGS_CENTRALIZED_MEMORY_MODEL_SUPPORTED 0x4UL
- #define CFA_EEM_QCAPS_RESP_FLAGS_DETACHED_CENTRALIZED_MEMORY_MODEL_SUPPORTED 0x8UL
- __le32 unused_0;
- __le32 supported;
- #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY0_TABLE 0x1UL
- #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY1_TABLE 0x2UL
- #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_RECORD_TABLE 0x4UL
- #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE 0x8UL
- #define CFA_EEM_QCAPS_RESP_SUPPORTED_FID_TABLE 0x10UL
- __le32 max_entries_supported;
- __le16 key_entry_size;
- __le16 record_entry_size;
- __le16 efc_entry_size;
- __le16 fid_entry_size;
- u8 unused_1[7];
- u8 valid;
-};
-
-/* hwrm_cfa_eem_cfg_input (size:384b/48B) */
-struct hwrm_cfa_eem_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_EEM_CFG_REQ_FLAGS_PATH_TX 0x1UL
- #define CFA_EEM_CFG_REQ_FLAGS_PATH_RX 0x2UL
- #define CFA_EEM_CFG_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL
- #define CFA_EEM_CFG_REQ_FLAGS_SECONDARY_PF 0x8UL
- __le16 group_id;
- __le16 unused_0;
- __le32 num_entries;
- __le32 unused_1;
- __le16 key0_ctx_id;
- __le16 key1_ctx_id;
- __le16 record_ctx_id;
- __le16 efc_ctx_id;
- __le16 fid_ctx_id;
- __le16 unused_2;
- __le32 unused_3;
-};
-
-/* hwrm_cfa_eem_cfg_output (size:128b/16B) */
-struct hwrm_cfa_eem_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_eem_qcfg_input (size:192b/24B) */
-struct hwrm_cfa_eem_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_EEM_QCFG_REQ_FLAGS_PATH_TX 0x1UL
- #define CFA_EEM_QCFG_REQ_FLAGS_PATH_RX 0x2UL
- __le32 unused_0;
-};
-
-/* hwrm_cfa_eem_qcfg_output (size:256b/32B) */
-struct hwrm_cfa_eem_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define CFA_EEM_QCFG_RESP_FLAGS_PATH_TX 0x1UL
- #define CFA_EEM_QCFG_RESP_FLAGS_PATH_RX 0x2UL
- #define CFA_EEM_QCFG_RESP_FLAGS_PREFERRED_OFFLOAD 0x4UL
- __le32 num_entries;
- __le16 key0_ctx_id;
- __le16 key1_ctx_id;
- __le16 record_ctx_id;
- __le16 efc_ctx_id;
- __le16 fid_ctx_id;
- u8 unused_2[5];
- u8 valid;
-};
-
-/* hwrm_cfa_eem_op_input (size:192b/24B) */
-struct hwrm_cfa_eem_op_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_EEM_OP_REQ_FLAGS_PATH_TX 0x1UL
- #define CFA_EEM_OP_REQ_FLAGS_PATH_RX 0x2UL
- __le16 unused_0;
- __le16 op;
- #define CFA_EEM_OP_REQ_OP_RESERVED 0x0UL
- #define CFA_EEM_OP_REQ_OP_EEM_DISABLE 0x1UL
- #define CFA_EEM_OP_REQ_OP_EEM_ENABLE 0x2UL
- #define CFA_EEM_OP_REQ_OP_EEM_CLEANUP 0x3UL
- #define CFA_EEM_OP_REQ_OP_LAST CFA_EEM_OP_REQ_OP_EEM_CLEANUP
-};
-
-/* hwrm_cfa_eem_op_output (size:128b/16B) */
-struct hwrm_cfa_eem_op_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_adv_flow_mgnt_qcaps_input (size:256b/32B) */
-struct hwrm_cfa_adv_flow_mgnt_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 unused_0[4];
-};
-
-/* hwrm_cfa_adv_flow_mgnt_qcaps_output (size:128b/16B) */
-struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ARP_SUPPORTED 0x1000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED 0x2000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED 0x4000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TRUFLOW_CAPABLE 0x8000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_FILTER_TRAFFIC_TYPE_L2_ROCE_SUPPORTED 0x10000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_LAG_SUPPORTED 0x20000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_NO_L2CTX_SUPPORTED 0x40000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NIC_FLOW_STATS_SUPPORTED 0x80000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED 0x100000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED 0x200000UL
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
-struct hwrm_tunnel_dst_port_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 tunnel_type;
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
- u8 tunnel_next_proto;
- u8 unused_0[6];
-};
-
-/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */
-struct hwrm_tunnel_dst_port_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 tunnel_dst_port_id;
- __be16 tunnel_dst_port_val;
- u8 upar_in_use;
- #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR0 0x1UL
- #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR1 0x2UL
- #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR2 0x4UL
- #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR3 0x8UL
- #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR4 0x10UL
- #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR5 0x20UL
- #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR6 0x40UL
- #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR7 0x80UL
- u8 status;
- #define TUNNEL_DST_PORT_QUERY_RESP_STATUS_CHIP_LEVEL 0x1UL
- #define TUNNEL_DST_PORT_QUERY_RESP_STATUS_FUNC_LEVEL 0x2UL
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */
-struct hwrm_tunnel_dst_port_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 tunnel_type;
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
- u8 tunnel_next_proto;
- __be16 tunnel_dst_port_val;
- u8 unused_0[4];
-};
-
-/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */
-struct hwrm_tunnel_dst_port_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 tunnel_dst_port_id;
- u8 error_info;
- #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_SUCCESS 0x0UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ALLOCATED 0x1UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_NO_RESOURCE 0x2UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ENABLED 0x3UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ENABLED
- u8 upar_in_use;
- #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR0 0x1UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR1 0x2UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR2 0x4UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR3 0x8UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR4 0x10UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR5 0x20UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR6 0x40UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR7 0x80UL
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */
-struct hwrm_tunnel_dst_port_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 tunnel_type;
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
- u8 tunnel_next_proto;
- __le16 tunnel_dst_port_id;
- u8 unused_0[4];
-};
-
-/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */
-struct hwrm_tunnel_dst_port_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 error_info;
- #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_SUCCESS 0x0UL
- #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_OWNER 0x1UL
- #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_ALLOCATED 0x2UL
- #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_ALLOCATED
- u8 unused_1[6];
- u8 valid;
-};
-
-/* ctx_hw_stats (size:1280b/160B) */
-struct ctx_hw_stats {
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_error_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_error_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 tpa_pkts;
- __le64 tpa_bytes;
- __le64 tpa_events;
- __le64 tpa_aborts;
-};
-
-/* ctx_hw_stats_ext (size:1408b/176B) */
-struct ctx_hw_stats_ext {
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_error_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_error_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 rx_tpa_eligible_pkt;
- __le64 rx_tpa_eligible_bytes;
- __le64 rx_tpa_pkt;
- __le64 rx_tpa_bytes;
- __le64 rx_tpa_errors;
- __le64 rx_tpa_events;
-};
-
-/* hwrm_stat_ctx_alloc_input (size:384b/48B) */
-struct hwrm_stat_ctx_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 stats_dma_addr;
- __le32 update_period_ms;
- u8 stat_ctx_flags;
- #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
- #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_DUP_HOST_BUF 0x2UL
- u8 unused_0;
- __le16 stats_dma_length;
- __le16 flags;
- #define STAT_CTX_ALLOC_REQ_FLAGS_STEERING_TAG_VALID 0x1UL
- __le16 steering_tag;
- __le32 stat_ctx_id;
- __le16 alloc_seq_id;
- u8 unused_1[6];
-};
-
-/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
-struct hwrm_stat_ctx_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 stat_ctx_id;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_stat_ctx_free_input (size:192b/24B) */
-struct hwrm_stat_ctx_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 stat_ctx_id;
- u8 unused_0[4];
-};
-
-/* hwrm_stat_ctx_free_output (size:128b/16B) */
-struct hwrm_stat_ctx_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 stat_ctx_id;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_stat_ctx_query_input (size:192b/24B) */
-struct hwrm_stat_ctx_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 stat_ctx_id;
- u8 flags;
- #define STAT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
- u8 unused_0[3];
-};
-
-/* hwrm_stat_ctx_query_output (size:1408b/176B) */
-struct hwrm_stat_ctx_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_error_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_error_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 rx_agg_pkts;
- __le64 rx_agg_bytes;
- __le64 rx_agg_events;
- __le64 rx_agg_aborts;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_stat_ext_ctx_query_input (size:192b/24B) */
-struct hwrm_stat_ext_ctx_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 stat_ctx_id;
- u8 flags;
- #define STAT_EXT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
- u8 unused_0[3];
-};
-
-/* hwrm_stat_ext_ctx_query_output (size:1536b/192B) */
-struct hwrm_stat_ext_ctx_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_error_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_error_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 rx_tpa_eligible_pkt;
- __le64 rx_tpa_eligible_bytes;
- __le64 rx_tpa_pkt;
- __le64 rx_tpa_bytes;
- __le64 rx_tpa_errors;
- __le64 rx_tpa_events;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */
-struct hwrm_stat_ctx_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 stat_ctx_id;
- u8 unused_0[4];
-};
-
-/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */
-struct hwrm_stat_ctx_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_pcie_qstats_input (size:256b/32B) */
-struct hwrm_pcie_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 pcie_stat_size;
- u8 unused_0[6];
- __le64 pcie_stat_host_addr;
-};
-
-/* hwrm_pcie_qstats_output (size:128b/16B) */
-struct hwrm_pcie_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 pcie_stat_size;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* pcie_ctx_hw_stats (size:768b/96B) */
-struct pcie_ctx_hw_stats {
- __le64 pcie_pl_signal_integrity;
- __le64 pcie_dl_signal_integrity;
- __le64 pcie_tl_signal_integrity;
- __le64 pcie_link_integrity;
- __le64 pcie_tx_traffic_rate;
- __le64 pcie_rx_traffic_rate;
- __le64 pcie_tx_dllp_statistics;
- __le64 pcie_rx_dllp_statistics;
- __le64 pcie_equalization_time;
- __le32 pcie_ltssm_histogram[4];
- __le64 pcie_recovery_histogram;
-};
-
-/* pcie_ctx_hw_stats_v2 (size:4096b/512B) */
-struct pcie_ctx_hw_stats_v2 {
- __le64 pcie_pl_signal_integrity;
- __le64 pcie_dl_signal_integrity;
- __le64 pcie_tl_signal_integrity;
- __le64 pcie_link_integrity;
- __le64 pcie_tx_traffic_rate;
- __le64 pcie_rx_traffic_rate;
- __le64 pcie_tx_dllp_statistics;
- __le64 pcie_rx_dllp_statistics;
- __le64 pcie_equalization_time;
- __le32 pcie_ltssm_histogram[4];
- __le64 pcie_recovery_histogram;
- __le32 pcie_tl_credit_nph_histogram[8];
- __le32 pcie_tl_credit_ph_histogram[8];
- __le32 pcie_tl_credit_pd_histogram[8];
- __le32 pcie_cmpl_latest_times[4];
- __le32 pcie_cmpl_longest_time;
- __le32 pcie_cmpl_shortest_time;
- __le32 unused_0[2];
- __le32 pcie_cmpl_latest_headers[4][4];
- __le32 pcie_cmpl_longest_headers[4][4];
- __le32 pcie_cmpl_shortest_headers[4][4];
- __le32 pcie_wr_latency_histogram[12];
- __le32 pcie_wr_latency_all_normal_count;
- __le32 unused_1;
- __le64 pcie_posted_packet_count;
- __le64 pcie_non_posted_packet_count;
- __le64 pcie_other_packet_count;
- __le64 pcie_blocked_packet_count;
- __le64 pcie_cmpl_packet_count;
-};
-
-/* hwrm_stat_generic_qstats_input (size:256b/32B) */
-struct hwrm_stat_generic_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 generic_stat_size;
- u8 flags;
- #define STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
- u8 unused_0[5];
- __le64 generic_stat_host_addr;
-};
-
-/* hwrm_stat_generic_qstats_output (size:128b/16B) */
-struct hwrm_stat_generic_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 generic_stat_size;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* generic_sw_hw_stats (size:1472b/184B) */
-struct generic_sw_hw_stats {
- __le64 pcie_statistics_tx_tlp;
- __le64 pcie_statistics_rx_tlp;
- __le64 pcie_credit_fc_hdr_posted;
- __le64 pcie_credit_fc_hdr_nonposted;
- __le64 pcie_credit_fc_hdr_cmpl;
- __le64 pcie_credit_fc_data_posted;
- __le64 pcie_credit_fc_data_nonposted;
- __le64 pcie_credit_fc_data_cmpl;
- __le64 pcie_credit_fc_tgt_nonposted;
- __le64 pcie_credit_fc_tgt_data_posted;
- __le64 pcie_credit_fc_tgt_hdr_posted;
- __le64 pcie_credit_fc_cmpl_hdr_posted;
- __le64 pcie_credit_fc_cmpl_data_posted;
- __le64 pcie_cmpl_longest;
- __le64 pcie_cmpl_shortest;
- __le64 cache_miss_count_cfcq;
- __le64 cache_miss_count_cfcs;
- __le64 cache_miss_count_cfcc;
- __le64 cache_miss_count_cfcm;
- __le64 hw_db_recov_dbs_dropped;
- __le64 hw_db_recov_drops_serviced;
- __le64 hw_db_recov_dbs_recovered;
- __le64 hw_db_recov_oo_drop_count;
-};
-
-/* hwrm_fw_reset_input (size:192b/24B) */
-struct hwrm_fw_reset_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 embedded_proc_type;
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT 0x7UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_IMPACTLESS_ACTIVATION 0x8UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_IMPACTLESS_ACTIVATION
- u8 selfrst_status;
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL
- #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE
- u8 host_idx;
- u8 flags;
- #define FW_RESET_REQ_FLAGS_RESET_GRACEFUL 0x1UL
- #define FW_RESET_REQ_FLAGS_FW_ACTIVATION 0x2UL
- u8 unused_0[4];
-};
-
-/* hwrm_fw_reset_output (size:128b/16B) */
-struct hwrm_fw_reset_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 selfrst_status;
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL
- #define FW_RESET_RESP_SELFRST_STATUS_LAST FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE
- u8 unused_0[6];
- u8 valid;
-};
-
-/* hwrm_fw_qstatus_input (size:192b/24B) */
-struct hwrm_fw_qstatus_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 embedded_proc_type;
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_LAST FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP
- u8 unused_0[7];
-};
-
-/* hwrm_fw_qstatus_output (size:128b/16B) */
-struct hwrm_fw_qstatus_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 selfrst_status;
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER 0x3UL
- #define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER
- u8 nvm_option_action_status;
- #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_NONE 0x0UL
- #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_HOTRESET 0x1UL
- #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_WARMBOOT 0x2UL
- #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT 0x3UL
- #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_LAST FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_fw_set_time_input (size:256b/32B) */
-struct hwrm_fw_set_time_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 year;
- #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL
- #define FW_SET_TIME_REQ_YEAR_LAST FW_SET_TIME_REQ_YEAR_UNKNOWN
- u8 month;
- u8 day;
- u8 hour;
- u8 minute;
- u8 second;
- u8 unused_0;
- __le16 millisecond;
- __le16 zone;
- #define FW_SET_TIME_REQ_ZONE_UTC 0
- #define FW_SET_TIME_REQ_ZONE_UNKNOWN 65535
- #define FW_SET_TIME_REQ_ZONE_LAST FW_SET_TIME_REQ_ZONE_UNKNOWN
- u8 unused_1[4];
-};
-
-/* hwrm_fw_set_time_output (size:128b/16B) */
-struct hwrm_fw_set_time_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_struct_hdr (size:128b/16B) */
-struct hwrm_struct_hdr {
- __le16 struct_id;
- #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL
- #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
- #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
- #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
- #define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL
- #define STRUCT_HDR_STRUCT_ID_PEER_MMAP 0x429UL
- #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
- #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
- #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
- #define STRUCT_HDR_STRUCT_ID_MSIX_PER_VF 0xc8UL
- #define STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_COUNT 0x12cUL
- #define STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_BOUND 0x12dUL
- #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_BOUND
- __le16 len;
- u8 version;
- #define STRUCT_HDR_VERSION_0 0x0UL
- #define STRUCT_HDR_VERSION_1 0x1UL
- #define STRUCT_HDR_VERSION_LAST STRUCT_HDR_VERSION_1
- u8 count;
- __le16 subtype;
- __le16 next_offset;
- #define STRUCT_HDR_NEXT_OFFSET_LAST 0x0UL
- u8 unused_0[6];
-};
-
-/* hwrm_struct_data_dcbx_app (size:64b/8B) */
-struct hwrm_struct_data_dcbx_app {
- __be16 protocol_id;
- u8 protocol_selector;
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_LAST STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT
- u8 priority;
- u8 valid;
- u8 unused_0[3];
-};
-
-/* hwrm_fw_set_structured_data_input (size:256b/32B) */
-struct hwrm_fw_set_structured_data_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 src_data_addr;
- __le16 data_len;
- u8 hdr_cnt;
- u8 unused_0[5];
-};
-
-/* hwrm_fw_set_structured_data_output (size:128b/16B) */
-struct hwrm_fw_set_structured_data_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_fw_set_structured_data_cmd_err (size:64b/8B) */
-struct hwrm_fw_set_structured_data_cmd_err {
- u8 code;
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID
- u8 unused_0[7];
-};
-
-/* hwrm_fw_get_structured_data_input (size:256b/32B) */
-struct hwrm_fw_get_structured_data_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 dest_data_addr;
- __le16 data_len;
- __le16 structure_id;
- __le16 subtype;
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_UNUSED 0x0UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL 0xffffUL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN 0x100UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER 0x101UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_OPERATIONAL 0x102UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_LAST FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL
- u8 count;
- u8 unused_0;
-};
-
-/* hwrm_fw_get_structured_data_output (size:128b/16B) */
-struct hwrm_fw_get_structured_data_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 hdr_cnt;
- u8 unused_0[6];
- u8 valid;
-};
-
-/* hwrm_fw_get_structured_data_cmd_err (size:64b/8B) */
-struct hwrm_fw_get_structured_data_cmd_err {
- u8 code;
- #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
- #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID
- u8 unused_0[7];
-};
-
-/* hwrm_fw_livepatch_query_input (size:192b/24B) */
-struct hwrm_fw_livepatch_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 fw_target;
- #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_COMMON_FW 0x1UL
- #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW 0x2UL
- #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_LAST FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW
- u8 unused_0[7];
-};
-
-/* hwrm_fw_livepatch_query_output (size:640b/80B) */
-struct hwrm_fw_livepatch_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- char install_ver[32];
- char active_ver[32];
- __le16 status_flags;
- #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL 0x1UL
- #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE 0x2UL
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_fw_livepatch_input (size:256b/32B) */
-struct hwrm_fw_livepatch_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 opcode;
- #define FW_LIVEPATCH_REQ_OPCODE_ACTIVATE 0x1UL
- #define FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE 0x2UL
- #define FW_LIVEPATCH_REQ_OPCODE_LAST FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE
- u8 fw_target;
- #define FW_LIVEPATCH_REQ_FW_TARGET_COMMON_FW 0x1UL
- #define FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW 0x2UL
- #define FW_LIVEPATCH_REQ_FW_TARGET_LAST FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW
- u8 loadtype;
- #define FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL 0x1UL
- #define FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT 0x2UL
- #define FW_LIVEPATCH_REQ_LOADTYPE_LAST FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT
- u8 flags;
- __le32 patch_len;
- __le64 host_addr;
-};
-
-/* hwrm_fw_livepatch_output (size:128b/16B) */
-struct hwrm_fw_livepatch_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_fw_livepatch_cmd_err (size:64b/8B) */
-struct hwrm_fw_livepatch_cmd_err {
- u8 code;
- #define FW_LIVEPATCH_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_OPCODE 0x1UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_TARGET 0x2UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_SUPPORTED 0x3UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_INSTALLED 0x4UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_PATCHED 0x5UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_AUTH_FAIL 0x6UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_HEADER 0x7UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_SIZE 0x8UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED 0x9UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_LAST FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED
- u8 unused_0[7];
-};
-
-/* hwrm_exec_fwd_resp_input (size:1024b/128B) */
-struct hwrm_exec_fwd_resp_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 encap_request[26];
- __le16 encap_resp_target_id;
- u8 unused_0[6];
-};
-
-/* hwrm_exec_fwd_resp_output (size:128b/16B) */
-struct hwrm_exec_fwd_resp_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_reject_fwd_resp_input (size:1024b/128B) */
-struct hwrm_reject_fwd_resp_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 encap_request[26];
- __le16 encap_resp_target_id;
- u8 unused_0[6];
-};
-
-/* hwrm_reject_fwd_resp_output (size:128b/16B) */
-struct hwrm_reject_fwd_resp_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_fwd_resp_input (size:1024b/128B) */
-struct hwrm_fwd_resp_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 encap_resp_target_id;
- __le16 encap_resp_cmpl_ring;
- __le16 encap_resp_len;
- u8 unused_0;
- u8 unused_1;
- __le64 encap_resp_addr;
- __le32 encap_resp[24];
-};
-
-/* hwrm_fwd_resp_output (size:128b/16B) */
-struct hwrm_fwd_resp_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_fwd_async_event_cmpl_input (size:320b/40B) */
-struct hwrm_fwd_async_event_cmpl_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 encap_async_event_target_id;
- u8 unused_0[6];
- __le32 encap_async_event_cmpl[4];
-};
-
-/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */
-struct hwrm_fwd_async_event_cmpl_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_temp_monitor_query_input (size:128b/16B) */
-struct hwrm_temp_monitor_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_temp_monitor_query_output (size:192b/24B) */
-struct hwrm_temp_monitor_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 temp;
- u8 phy_temp;
- u8 om_temp;
- u8 flags;
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_TEMP_NOT_AVAILABLE 0x1UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_PHY_TEMP_NOT_AVAILABLE 0x2UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_NOT_PRESENT 0x4UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_TEMP_NOT_AVAILABLE 0x8UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_EXT_TEMP_FIELDS_AVAILABLE 0x10UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_THRESHOLD_VALUES_AVAILABLE 0x20UL
- u8 temp2;
- u8 phy_temp2;
- u8 om_temp2;
- u8 warn_threshold;
- u8 critical_threshold;
- u8 fatal_threshold;
- u8 shutdown_threshold;
- u8 unused_0[4];
- u8 valid;
-};
-
-/* hwrm_wol_filter_alloc_input (size:512b/64B) */
-struct hwrm_wol_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- __le32 enables;
- #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS 0x1UL
- #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET 0x2UL
- #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_SIZE 0x4UL
- #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_ADDR 0x8UL
- #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_ADDR 0x10UL
- #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_SIZE 0x20UL
- __le16 port_id;
- u8 wol_type;
- #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL
- #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP 0x1UL
- #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID 0xffUL
- #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_LAST WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID
- u8 unused_0[5];
- u8 mac_address[6];
- __le16 pattern_offset;
- __le16 pattern_buf_size;
- __le16 pattern_mask_size;
- u8 unused_1[4];
- __le64 pattern_buf_addr;
- __le64 pattern_mask_addr;
-};
-
-/* hwrm_wol_filter_alloc_output (size:128b/16B) */
-struct hwrm_wol_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 wol_filter_id;
- u8 unused_0[6];
- u8 valid;
-};
-
-/* hwrm_wol_filter_free_input (size:256b/32B) */
-struct hwrm_wol_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define WOL_FILTER_FREE_REQ_FLAGS_FREE_ALL_WOL_FILTERS 0x1UL
- __le32 enables;
- #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID 0x1UL
- __le16 port_id;
- u8 wol_filter_id;
- u8 unused_0[5];
-};
-
-/* hwrm_wol_filter_free_output (size:128b/16B) */
-struct hwrm_wol_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_wol_filter_qcfg_input (size:448b/56B) */
-struct hwrm_wol_filter_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 handle;
- u8 unused_0[4];
- __le64 pattern_buf_addr;
- __le16 pattern_buf_size;
- u8 unused_1[6];
- __le64 pattern_mask_addr;
- __le16 pattern_mask_size;
- u8 unused_2[6];
-};
-
-/* hwrm_wol_filter_qcfg_output (size:256b/32B) */
-struct hwrm_wol_filter_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 next_handle;
- u8 wol_filter_id;
- u8 wol_type;
- #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL
- #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP 0x1UL
- #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID 0xffUL
- #define WOL_FILTER_QCFG_RESP_WOL_TYPE_LAST WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID
- __le32 unused_0;
- u8 mac_address[6];
- __le16 pattern_offset;
- __le16 pattern_size;
- __le16 pattern_mask_size;
- u8 unused_1[3];
- u8 valid;
-};
-
-/* hwrm_wol_reason_qcfg_input (size:320b/40B) */
-struct hwrm_wol_reason_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
- __le64 wol_pkt_buf_addr;
- __le16 wol_pkt_buf_size;
- u8 unused_1[6];
-};
-
-/* hwrm_wol_reason_qcfg_output (size:128b/16B) */
-struct hwrm_wol_reason_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 wol_filter_id;
- u8 wol_reason;
- #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL
- #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP 0x1UL
- #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID 0xffUL
- #define WOL_REASON_QCFG_RESP_WOL_REASON_LAST WOL_REASON_QCFG_RESP_WOL_REASON_INVALID
- u8 wol_pkt_len;
- u8 unused_0[4];
- u8 valid;
-};
-
-/* hwrm_dbg_read_direct_input (size:256b/32B) */
-struct hwrm_dbg_read_direct_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
- __le32 read_addr;
- __le32 read_len32;
-};
-
-/* hwrm_dbg_read_direct_output (size:128b/16B) */
-struct hwrm_dbg_read_direct_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 crc32;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_dbg_qcaps_input (size:192b/24B) */
-struct hwrm_dbg_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0[6];
-};
-
-/* hwrm_dbg_qcaps_output (size:192b/24B) */
-struct hwrm_dbg_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- u8 unused_0[2];
- __le32 coredump_component_disable_caps;
- #define DBG_QCAPS_RESP_COREDUMP_COMPONENT_DISABLE_CAPS_NVRAM 0x1UL
- __le32 flags;
- #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_NVM 0x1UL
- #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR 0x2UL
- #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR 0x4UL
- #define DBG_QCAPS_RESP_FLAGS_USEQ 0x8UL
- #define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_DDR 0x10UL
- #define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_CAPTURE 0x20UL
- #define DBG_QCAPS_RESP_FLAGS_PTRACE 0x40UL
- #define DBG_QCAPS_RESP_FLAGS_REG_ACCESS_RESTRICTED 0x80UL
- u8 unused_1[3];
- u8 valid;
-};
-
-/* hwrm_dbg_qcfg_input (size:192b/24B) */
-struct hwrm_dbg_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- __le16 flags;
- #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_MASK 0x3UL
- #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_SFT 0
- #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_NVM 0x0UL
- #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_HOST_DDR 0x1UL
- #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR 0x2UL
- #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_LAST DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR
- __le32 coredump_component_disable_flags;
- #define DBG_QCFG_REQ_COREDUMP_COMPONENT_DISABLE_FLAGS_NVRAM 0x1UL
-};
-
-/* hwrm_dbg_qcfg_output (size:256b/32B) */
-struct hwrm_dbg_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- u8 unused_0[2];
- __le32 coredump_size;
- __le32 flags;
- #define DBG_QCFG_RESP_FLAGS_UART_LOG 0x1UL
- #define DBG_QCFG_RESP_FLAGS_UART_LOG_SECONDARY 0x2UL
- #define DBG_QCFG_RESP_FLAGS_FW_TRACE 0x4UL
- #define DBG_QCFG_RESP_FLAGS_FW_TRACE_SECONDARY 0x8UL
- #define DBG_QCFG_RESP_FLAGS_DEBUG_NOTIFY 0x10UL
- #define DBG_QCFG_RESP_FLAGS_JTAG_DEBUG 0x20UL
- __le16 async_cmpl_ring;
- u8 unused_2[2];
- __le32 crashdump_size;
- u8 unused_3[3];
- u8 valid;
-};
-
-/* hwrm_dbg_crashdump_medium_cfg_input (size:320b/40B) */
-struct hwrm_dbg_crashdump_medium_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 output_dest_flags;
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR 0x1UL
- __le16 pg_size_lvl;
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_MASK 0x3UL
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_SFT 0
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_0 0x0UL
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_1 0x1UL
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2 0x2UL
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_MASK 0x1cUL
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_SFT 2
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K (0x0UL << 2)
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K (0x1UL << 2)
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K (0x2UL << 2)
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_2M (0x3UL << 2)
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8M (0x4UL << 2)
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G (0x5UL << 2)
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_MASK 0xffe0UL
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_SFT 5
- __le32 size;
- __le32 coredump_component_disable_flags;
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_NVRAM 0x1UL
- __le32 unused_0;
- __le64 pbl;
-};
-
-/* hwrm_dbg_crashdump_medium_cfg_output (size:128b/16B) */
-struct hwrm_dbg_crashdump_medium_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_1[7];
- u8 valid;
-};
-
-/* coredump_segment_record (size:128b/16B) */
-struct coredump_segment_record {
- __le16 component_id;
- __le16 segment_id;
- __le16 max_instances;
- u8 version_hi;
- u8 version_low;
- u8 seg_flags;
- u8 compress_flags;
- #define SFLAG_COMPRESSED_ZLIB 0x1UL
- u8 unused_0[2];
- __le32 segment_len;
-};
-
-/* hwrm_dbg_coredump_list_input (size:256b/32B) */
-struct hwrm_dbg_coredump_list_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
- __le32 host_buf_len;
- __le16 seq_no;
- u8 flags;
- #define DBG_COREDUMP_LIST_REQ_FLAGS_CRASHDUMP 0x1UL
- u8 unused_0[1];
-};
-
-/* hwrm_dbg_coredump_list_output (size:128b/16B) */
-struct hwrm_dbg_coredump_list_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define DBG_COREDUMP_LIST_RESP_FLAGS_MORE 0x1UL
- u8 unused_0;
- __le16 total_segments;
- __le16 data_len;
- u8 unused_1;
- u8 valid;
-};
-
-/* hwrm_dbg_coredump_initiate_input (size:256b/32B) */
-struct hwrm_dbg_coredump_initiate_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 component_id;
- __le16 segment_id;
- __le16 instance;
- __le16 unused_0;
- u8 seg_flags;
- #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_LIVE_DATA 0x1UL
- #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_CRASH_DATA 0x2UL
- #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_COLLECT_CTX_L1_CACHE 0x4UL
- u8 unused_1[7];
-};
-
-/* hwrm_dbg_coredump_initiate_output (size:128b/16B) */
-struct hwrm_dbg_coredump_initiate_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* coredump_data_hdr (size:128b/16B) */
-struct coredump_data_hdr {
- __le32 address;
- __le32 flags_length;
- #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_MASK 0xffffffUL
- #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_SFT 0
- #define COREDUMP_DATA_HDR_FLAGS_LENGTH_INDIRECT_ACCESS 0x1000000UL
- __le32 instance;
- __le32 next_offset;
-};
-
-/* hwrm_dbg_coredump_retrieve_input (size:448b/56B) */
-struct hwrm_dbg_coredump_retrieve_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
- __le32 host_buf_len;
- __le32 unused_0;
- __le16 component_id;
- __le16 segment_id;
- __le16 instance;
- __le16 unused_1;
- u8 seg_flags;
- u8 unused_2;
- __le16 unused_3;
- __le32 unused_4;
- __le32 seq_no;
- __le32 unused_5;
-};
-
-/* hwrm_dbg_coredump_retrieve_output (size:128b/16B) */
-struct hwrm_dbg_coredump_retrieve_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define DBG_COREDUMP_RETRIEVE_RESP_FLAGS_MORE 0x1UL
- u8 unused_0;
- __le16 data_len;
- u8 unused_1[3];
- u8 valid;
-};
-
-/* hwrm_dbg_ring_info_get_input (size:192b/24B) */
-struct hwrm_dbg_ring_info_get_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 ring_type;
- #define DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define DBG_RING_INFO_GET_REQ_RING_TYPE_TX 0x1UL
- #define DBG_RING_INFO_GET_REQ_RING_TYPE_RX 0x2UL
- #define DBG_RING_INFO_GET_REQ_RING_TYPE_NQ 0x3UL
- #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_NQ
- u8 unused_0[3];
- __le32 fw_ring_id;
-};
-
-/* hwrm_dbg_ring_info_get_output (size:192b/24B) */
-struct hwrm_dbg_ring_info_get_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 producer_index;
- __le32 consumer_index;
- __le32 cag_vector_ctrl;
- __le16 st_tag;
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_dbg_log_buffer_flush_input (size:192b/24B) */
-struct hwrm_dbg_log_buffer_flush_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 type;
- #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE 0x0UL
- #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE 0x1UL
- #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE 0x2UL
- #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE 0x3UL
- #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE 0x4UL
- #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE 0x5UL
- #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE 0x6UL
- #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA0_TRACE 0x7UL
- #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE 0x8UL
- #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE 0x9UL
- #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE 0xaUL
- #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE 0xbUL
- #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_LAST DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE
- u8 unused_1[2];
- __le32 flags;
- #define DBG_LOG_BUFFER_FLUSH_REQ_FLAGS_FLUSH_ALL_BUFFERS 0x1UL
-};
-
-/* hwrm_dbg_log_buffer_flush_output (size:128b/16B) */
-struct hwrm_dbg_log_buffer_flush_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 current_buffer_offset;
- u8 unused_1[3];
- u8 valid;
-};
-
-/* hwrm_nvm_read_input (size:320b/40B) */
-struct hwrm_nvm_read_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
- __le16 dir_idx;
- u8 unused_0[2];
- __le32 offset;
- __le32 len;
- u8 unused_1[4];
-};
-
-/* hwrm_nvm_read_output (size:128b/16B) */
-struct hwrm_nvm_read_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_get_dir_entries_input (size:192b/24B) */
-struct hwrm_nvm_get_dir_entries_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
-};
-
-/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */
-struct hwrm_nvm_get_dir_entries_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_get_dir_info_input (size:128b/16B) */
-struct hwrm_nvm_get_dir_info_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_nvm_get_dir_info_output (size:192b/24B) */
-struct hwrm_nvm_get_dir_info_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 entries;
- __le32 entry_length;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_write_input (size:448b/56B) */
-struct hwrm_nvm_write_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_src_addr;
- __le16 dir_type;
- __le16 dir_ordinal;
- __le16 dir_ext;
- __le16 dir_attr;
- __le32 dir_data_length;
- __le16 option;
- __le16 flags;
- #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL
- #define NVM_WRITE_REQ_FLAGS_BATCH_MODE 0x2UL
- #define NVM_WRITE_REQ_FLAGS_BATCH_LAST 0x4UL
- #define NVM_WRITE_REQ_FLAGS_SKIP_CRID_CHECK 0x8UL
- __le32 dir_item_length;
- __le32 offset;
- __le32 len;
- __le32 unused_0;
-};
-
-/* hwrm_nvm_write_output (size:128b/16B) */
-struct hwrm_nvm_write_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 dir_item_length;
- __le16 dir_idx;
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_nvm_write_cmd_err (size:64b/8B) */
-struct hwrm_nvm_write_cmd_err {
- u8 code;
- #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL
- #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL
- #define NVM_WRITE_CMD_ERR_CODE_LAST NVM_WRITE_CMD_ERR_CODE_NO_SPACE
- u8 unused_0[7];
-};
-
-/* hwrm_nvm_modify_input (size:320b/40B) */
-struct hwrm_nvm_modify_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_src_addr;
- __le16 dir_idx;
- __le16 flags;
- #define NVM_MODIFY_REQ_FLAGS_BATCH_MODE 0x1UL
- #define NVM_MODIFY_REQ_FLAGS_BATCH_LAST 0x2UL
- __le32 offset;
- __le32 len;
- u8 unused_1[4];
-};
-
-/* hwrm_nvm_modify_output (size:128b/16B) */
-struct hwrm_nvm_modify_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_find_dir_entry_input (size:256b/32B) */
-struct hwrm_nvm_find_dir_entry_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL
- __le16 dir_idx;
- __le16 dir_type;
- __le16 dir_ordinal;
- __le16 dir_ext;
- u8 opt_ordinal;
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ 0x0UL
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE 0x1UL
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT 0x2UL
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_LAST NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT
- u8 unused_0[3];
-};
-
-/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */
-struct hwrm_nvm_find_dir_entry_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 dir_item_length;
- __le32 dir_data_length;
- __le32 fw_ver;
- __le16 dir_ordinal;
- __le16 dir_idx;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_erase_dir_entry_input (size:192b/24B) */
-struct hwrm_nvm_erase_dir_entry_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 dir_idx;
- u8 unused_0[6];
-};
-
-/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */
-struct hwrm_nvm_erase_dir_entry_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_get_dev_info_input (size:192b/24B) */
-struct hwrm_nvm_get_dev_info_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 flags;
- #define NVM_GET_DEV_INFO_REQ_FLAGS_SECURITY_SOC_NVM 0x1UL
- u8 unused_0[7];
-};
-
-/* hwrm_nvm_get_dev_info_output (size:768b/96B) */
-struct hwrm_nvm_get_dev_info_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 manufacturer_id;
- __le16 device_id;
- __le32 sector_size;
- __le32 nvram_size;
- __le32 reserved_size;
- __le32 available_size;
- u8 nvm_cfg_ver_maj;
- u8 nvm_cfg_ver_min;
- u8 nvm_cfg_ver_upd;
- u8 flags;
- #define NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID 0x1UL
- char pkg_name[16];
- __le16 hwrm_fw_major;
- __le16 hwrm_fw_minor;
- __le16 hwrm_fw_build;
- __le16 hwrm_fw_patch;
- __le16 mgmt_fw_major;
- __le16 mgmt_fw_minor;
- __le16 mgmt_fw_build;
- __le16 mgmt_fw_patch;
- __le16 roce_fw_major;
- __le16 roce_fw_minor;
- __le16 roce_fw_build;
- __le16 roce_fw_patch;
- __le16 netctrl_fw_major;
- __le16 netctrl_fw_minor;
- __le16 netctrl_fw_build;
- __le16 netctrl_fw_patch;
- __le16 srt2_fw_major;
- __le16 srt2_fw_minor;
- __le16 srt2_fw_build;
- __le16 srt2_fw_patch;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_mod_dir_entry_input (size:256b/32B) */
-struct hwrm_nvm_mod_dir_entry_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL
- __le16 dir_idx;
- __le16 dir_ordinal;
- __le16 dir_ext;
- __le16 dir_attr;
- __le32 checksum;
-};
-
-/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */
-struct hwrm_nvm_mod_dir_entry_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_verify_update_input (size:192b/24B) */
-struct hwrm_nvm_verify_update_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 dir_type;
- __le16 dir_ordinal;
- __le16 dir_ext;
- u8 unused_0[2];
-};
-
-/* hwrm_nvm_verify_update_output (size:128b/16B) */
-struct hwrm_nvm_verify_update_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_install_update_input (size:192b/24B) */
-struct hwrm_nvm_install_update_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 install_type;
- #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL
- #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL
- #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_LAST NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL
- __le16 flags;
- #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL
- #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL
- #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL
- #define NVM_INSTALL_UPDATE_REQ_FLAGS_VERIFY_ONLY 0x8UL
- u8 unused_0[2];
-};
-
-/* hwrm_nvm_install_update_output (size:192b/24B) */
-struct hwrm_nvm_install_update_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 installed_items;
- u8 result;
- #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_FAILURE 0xffUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_MALLOC_FAILURE 0xfdUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER 0xfbUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER 0xf3UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE 0xf2UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER 0xecUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE 0xebUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM 0xeaUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH 0xe9UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST 0xe8UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER 0xe7UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM 0xe6UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM 0xe5UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH 0xe4UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE 0xe1UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV 0xceUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID 0xcdUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR 0xccUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID 0xcbUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM 0xc5UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM 0xc4UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM 0xc3UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR 0xb9UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR 0xb8UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR 0xb7UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND 0xb0UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED 0xa7UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_LAST NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED
- u8 problem_item;
- #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL
- #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL
- #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_LAST NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE
- u8 reset_required;
- #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE 0x0UL
- #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI 0x1UL
- #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL
- #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_LAST NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER
- u8 unused_0[4];
- u8 valid;
-};
-
-/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */
-struct hwrm_nvm_install_update_cmd_err {
- u8 code;
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK 0x3UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT 0x4UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT
- u8 unused_0[7];
-};
-
-/* hwrm_nvm_get_variable_input (size:320b/40B) */
-struct hwrm_nvm_get_variable_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 dest_data_addr;
- __le16 data_len;
- __le16 option_num;
- #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
- #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
- #define NVM_GET_VARIABLE_REQ_OPTION_NUM_LAST NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
- __le16 dimensions;
- __le16 index_0;
- __le16 index_1;
- __le16 index_2;
- __le16 index_3;
- u8 flags;
- #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL
- u8 unused_0;
-};
-
-/* hwrm_nvm_get_variable_output (size:128b/16B) */
-struct hwrm_nvm_get_variable_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 data_len;
- __le16 option_num;
- #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0 0x0UL
- #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL
- #define NVM_GET_VARIABLE_RESP_OPTION_NUM_LAST NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */
-struct hwrm_nvm_get_variable_cmd_err {
- u8 code;
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_LAST NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT
- u8 unused_0[7];
-};
-
-/* hwrm_nvm_set_variable_input (size:320b/40B) */
-struct hwrm_nvm_set_variable_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 src_data_addr;
- __le16 data_len;
- __le16 option_num;
- #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
- #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
- #define NVM_SET_VARIABLE_REQ_OPTION_NUM_LAST NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
- __le16 dimensions;
- __le16 index_0;
- __le16 index_1;
- __le16 index_2;
- __le16 index_3;
- u8 flags;
- #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_AES256 (0x2UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH (0x3UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH
- #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_MASK 0x70UL
- #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_SFT 4
- #define NVM_SET_VARIABLE_REQ_FLAGS_FACTORY_DEFAULT 0x80UL
- u8 unused_0;
-};
-
-/* hwrm_nvm_set_variable_output (size:128b/16B) */
-struct hwrm_nvm_set_variable_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */
-struct hwrm_nvm_set_variable_cmd_err {
- u8 code;
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_LAST NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR
- u8 unused_0[7];
-};
-
-/* hwrm_selftest_qlist_input (size:128b/16B) */
-struct hwrm_selftest_qlist_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_selftest_qlist_output (size:2240b/280B) */
-struct hwrm_selftest_qlist_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_tests;
- u8 available_tests;
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST 0x1UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL
- u8 offline_tests;
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL
- u8 unused_0;
- __le16 test_timeout;
- u8 unused_1[2];
- char test_name[8][32];
- u8 eyescope_target_BER_support;
- #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E8_SUPPORTED 0x0UL
- #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E9_SUPPORTED 0x1UL
- #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E10_SUPPORTED 0x2UL
- #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E11_SUPPORTED 0x3UL
- #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED 0x4UL
- #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_LAST SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED
- u8 unused_2[6];
- u8 valid;
-};
-
-/* hwrm_selftest_exec_input (size:192b/24B) */
-struct hwrm_selftest_exec_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 flags;
- #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST 0x1UL
- #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL
- #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL
- #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL
- #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
- u8 unused_0[7];
-};
-
-/* hwrm_selftest_exec_output (size:128b/16B) */
-struct hwrm_selftest_exec_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 requested_tests;
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST 0x1UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL
- u8 test_success;
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_selftest_irq_input (size:128b/16B) */
-struct hwrm_selftest_irq_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_selftest_irq_output (size:128b/16B) */
-struct hwrm_selftest_irq_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* dbc_dbc (size:64b/8B) */
-struct dbc_dbc {
- __le32 index;
- #define DBC_DBC_INDEX_MASK 0xffffffUL
- #define DBC_DBC_INDEX_SFT 0
- #define DBC_DBC_EPOCH 0x1000000UL
- #define DBC_DBC_TOGGLE_MASK 0x6000000UL
- #define DBC_DBC_TOGGLE_SFT 25
- __le32 type_path_xid;
- #define DBC_DBC_XID_MASK 0xfffffUL
- #define DBC_DBC_XID_SFT 0
- #define DBC_DBC_PATH_MASK 0x3000000UL
- #define DBC_DBC_PATH_SFT 24
- #define DBC_DBC_PATH_ROCE (0x0UL << 24)
- #define DBC_DBC_PATH_L2 (0x1UL << 24)
- #define DBC_DBC_PATH_ENGINE (0x2UL << 24)
- #define DBC_DBC_PATH_LAST DBC_DBC_PATH_ENGINE
- #define DBC_DBC_VALID 0x4000000UL
- #define DBC_DBC_DEBUG_TRACE 0x8000000UL
- #define DBC_DBC_TYPE_MASK 0xf0000000UL
- #define DBC_DBC_TYPE_SFT 28
- #define DBC_DBC_TYPE_SQ (0x0UL << 28)
- #define DBC_DBC_TYPE_RQ (0x1UL << 28)
- #define DBC_DBC_TYPE_SRQ (0x2UL << 28)
- #define DBC_DBC_TYPE_SRQ_ARM (0x3UL << 28)
- #define DBC_DBC_TYPE_CQ (0x4UL << 28)
- #define DBC_DBC_TYPE_CQ_ARMSE (0x5UL << 28)
- #define DBC_DBC_TYPE_CQ_ARMALL (0x6UL << 28)
- #define DBC_DBC_TYPE_CQ_ARMENA (0x7UL << 28)
- #define DBC_DBC_TYPE_SRQ_ARMENA (0x8UL << 28)
- #define DBC_DBC_TYPE_CQ_CUTOFF_ACK (0x9UL << 28)
- #define DBC_DBC_TYPE_NQ (0xaUL << 28)
- #define DBC_DBC_TYPE_NQ_ARM (0xbUL << 28)
- #define DBC_DBC_TYPE_NQ_MASK (0xeUL << 28)
- #define DBC_DBC_TYPE_NULL (0xfUL << 28)
- #define DBC_DBC_TYPE_LAST DBC_DBC_TYPE_NULL
-};
-
-/* db_push_start (size:64b/8B) */
-struct db_push_start {
- u64 db;
- #define DB_PUSH_START_DB_INDEX_MASK 0xffffffUL
- #define DB_PUSH_START_DB_INDEX_SFT 0
- #define DB_PUSH_START_DB_PI_LO_MASK 0xff000000UL
- #define DB_PUSH_START_DB_PI_LO_SFT 24
- #define DB_PUSH_START_DB_XID_MASK 0xfffff00000000ULL
- #define DB_PUSH_START_DB_XID_SFT 32
- #define DB_PUSH_START_DB_PI_HI_MASK 0xf0000000000000ULL
- #define DB_PUSH_START_DB_PI_HI_SFT 52
- #define DB_PUSH_START_DB_TYPE_MASK 0xf000000000000000ULL
- #define DB_PUSH_START_DB_TYPE_SFT 60
- #define DB_PUSH_START_DB_TYPE_PUSH_START (0xcULL << 60)
- #define DB_PUSH_START_DB_TYPE_PUSH_END (0xdULL << 60)
- #define DB_PUSH_START_DB_TYPE_LAST DB_PUSH_START_DB_TYPE_PUSH_END
-};
-
-/* db_push_end (size:64b/8B) */
-struct db_push_end {
- u64 db;
- #define DB_PUSH_END_DB_INDEX_MASK 0xffffffUL
- #define DB_PUSH_END_DB_INDEX_SFT 0
- #define DB_PUSH_END_DB_PI_LO_MASK 0xff000000UL
- #define DB_PUSH_END_DB_PI_LO_SFT 24
- #define DB_PUSH_END_DB_XID_MASK 0xfffff00000000ULL
- #define DB_PUSH_END_DB_XID_SFT 32
- #define DB_PUSH_END_DB_PI_HI_MASK 0xf0000000000000ULL
- #define DB_PUSH_END_DB_PI_HI_SFT 52
- #define DB_PUSH_END_DB_PATH_MASK 0x300000000000000ULL
- #define DB_PUSH_END_DB_PATH_SFT 56
- #define DB_PUSH_END_DB_PATH_ROCE (0x0ULL << 56)
- #define DB_PUSH_END_DB_PATH_L2 (0x1ULL << 56)
- #define DB_PUSH_END_DB_PATH_ENGINE (0x2ULL << 56)
- #define DB_PUSH_END_DB_PATH_LAST DB_PUSH_END_DB_PATH_ENGINE
- #define DB_PUSH_END_DB_DEBUG_TRACE 0x800000000000000ULL
- #define DB_PUSH_END_DB_TYPE_MASK 0xf000000000000000ULL
- #define DB_PUSH_END_DB_TYPE_SFT 60
- #define DB_PUSH_END_DB_TYPE_PUSH_START (0xcULL << 60)
- #define DB_PUSH_END_DB_TYPE_PUSH_END (0xdULL << 60)
- #define DB_PUSH_END_DB_TYPE_LAST DB_PUSH_END_DB_TYPE_PUSH_END
-};
-
-/* db_push_info (size:64b/8B) */
-struct db_push_info {
- u32 push_size_push_index;
- #define DB_PUSH_INFO_PUSH_INDEX_MASK 0xffffffUL
- #define DB_PUSH_INFO_PUSH_INDEX_SFT 0
- #define DB_PUSH_INFO_PUSH_SIZE_MASK 0x1f000000UL
- #define DB_PUSH_INFO_PUSH_SIZE_SFT 24
- u32 reserved32;
-};
-
-/* fw_status_reg (size:32b/4B) */
-struct fw_status_reg {
- u32 fw_status;
- #define FW_STATUS_REG_CODE_MASK 0xffffUL
- #define FW_STATUS_REG_CODE_SFT 0
- #define FW_STATUS_REG_CODE_READY 0x8000UL
- #define FW_STATUS_REG_CODE_LAST FW_STATUS_REG_CODE_READY
- #define FW_STATUS_REG_IMAGE_DEGRADED 0x10000UL
- #define FW_STATUS_REG_RECOVERABLE 0x20000UL
- #define FW_STATUS_REG_CRASHDUMP_ONGOING 0x40000UL
- #define FW_STATUS_REG_CRASHDUMP_COMPLETE 0x80000UL
- #define FW_STATUS_REG_SHUTDOWN 0x100000UL
- #define FW_STATUS_REG_CRASHED_NO_MASTER 0x200000UL
- #define FW_STATUS_REG_RECOVERING 0x400000UL
- #define FW_STATUS_REG_MANU_DEBUG_STATUS 0x800000UL
-};
-
-/* hcomm_status (size:64b/8B) */
-struct hcomm_status {
- u32 sig_ver;
- #define HCOMM_STATUS_VER_MASK 0xffUL
- #define HCOMM_STATUS_VER_SFT 0
- #define HCOMM_STATUS_VER_LATEST 0x1UL
- #define HCOMM_STATUS_VER_LAST HCOMM_STATUS_VER_LATEST
- #define HCOMM_STATUS_SIGNATURE_MASK 0xffffff00UL
- #define HCOMM_STATUS_SIGNATURE_SFT 8
- #define HCOMM_STATUS_SIGNATURE_VAL (0x484353UL << 8)
- #define HCOMM_STATUS_SIGNATURE_LAST HCOMM_STATUS_SIGNATURE_VAL
- u32 fw_status_loc;
- #define HCOMM_STATUS_TRUE_ADDR_SPACE_MASK 0x3UL
- #define HCOMM_STATUS_TRUE_ADDR_SPACE_SFT 0
- #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_PCIE_CFG 0x0UL
- #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_GRC 0x1UL
- #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR0 0x2UL
- #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1 0x3UL
- #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_LAST HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1
- #define HCOMM_STATUS_TRUE_OFFSET_MASK 0xfffffffcUL
- #define HCOMM_STATUS_TRUE_OFFSET_SFT 2
-};
-#define HCOMM_STATUS_STRUCT_LOC 0x31001F0UL
-
-#endif /* _BNXT_HSI_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c
index 669d24ba0e87..de3427c6c6aa 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c
@@ -12,8 +12,8 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/pci.h>
+#include <linux/bnxt/hsi.h>
-#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_hwmon.h"
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
index d2fd2d04ed47..5ce190f50120 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
@@ -20,8 +20,8 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
+#include <linux/bnxt/hsi.h>
-#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
index fb5f5b063c3d..791b3a0cdb83 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
@@ -10,7 +10,7 @@
#ifndef BNXT_HWRM_H
#define BNXT_HWRM_H
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
enum bnxt_hwrm_ctx_flags {
/* Update the HWRM_API_FLAGS right below for any new non-internal bit added here */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 0669d43472f5..ca660e6d28a4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -15,7 +15,7 @@
#include <linux/timekeeping.h>
#include <linux/ptp_classify.h>
#include <linux/clocksource.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ptp.h"
@@ -702,7 +702,7 @@ static void bnxt_unmap_ptp_regs(struct bnxt *bp)
(BNXT_PTP_GRC_WIN - 1) * 4);
}
-static u64 bnxt_cc_read(const struct cyclecounter *cc)
+static u64 bnxt_cc_read(struct cyclecounter *cc)
{
struct bnxt_ptp_cfg *ptp = container_of(cc, struct bnxt_ptp_cfg, cc);
u64 ns = 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 5ddddd89052f..480e18a32caa 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -16,7 +16,7 @@
#include <linux/interrupt.h>
#include <linux/etherdevice.h>
#include <net/dcbnl.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
@@ -823,7 +823,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
int tx_ok = 0, rx_ok = 0, rss_ok = 0;
int avail_cp, avail_stat;
- /* Check if we can enable requested num of vf's. At a mininum
+ /* Check if we can enable requested num of vf's. At a minimum
* we require 1 RX 1 TX rings for each VF. In this minimum conf
* features like TPA will not be available.
*/
@@ -1125,7 +1125,7 @@ static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
/* There are two cases:
* 1.If firmware spec < 0x10202,VF MAC address is not forwarded
* to the PF and so it doesn't have to match
- * 2.Allow VF to modify it's own MAC when PF has not assigned a
+ * 2.Allow VF to modify its own MAC when PF has not assigned a
* valid MAC address and firmware spec >= 0x10202
*/
mac_ok = true;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index d2ca90407cce..d72fd248f3aa 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -19,8 +19,8 @@
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_tunnel_key.h>
#include <net/vxlan.h>
+#include <linux/bnxt/hsi.h>
-#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_sriov.h"
@@ -1316,7 +1316,7 @@ static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
/* Check if there's another flow using the same tunnel decap.
* If not, add this tunnel to the table and resolve the other
- * tunnel header fileds. Ignore src_port in the tunnel_key,
+ * tunnel header fields. Ignore src_port in the tunnel_key,
* since it is not required for decap filters.
*/
decap_key->tp_src = 0;
@@ -1410,7 +1410,7 @@ static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
/* Check if there's another flow using the same tunnel encap.
* If not, add this tunnel to the table and resolve the other
- * tunnel header fileds
+ * tunnel header fields
*/
encap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->encap_table,
&tc_info->encap_ht_params,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 2450a369b792..61cf201bb0dc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -21,8 +21,8 @@
#include <linux/bitmap.h>
#include <linux/auxiliary_bus.h>
#include <net/netdev_lock.h>
+#include <linux/bnxt/hsi.h>
-#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 619f0844e778..bd116fd578d8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -12,8 +12,8 @@
#include <linux/rtnetlink.h>
#include <linux/jhash.h>
#include <net/pkt_cls.h>
+#include <linux/bnxt/hsi.h>
-#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_vfr.h"
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 4a6d8cb9f970..58d579dca3f1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -17,7 +17,7 @@
#include <linux/filter.h>
#include <net/netdev_lock.h>
#include <net/page_pool/helpers.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_xdp.h"
@@ -115,7 +115,7 @@ static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
tx_buf->action = XDP_REDIRECT;
tx_buf->xdpf = xdpf;
dma_unmap_addr_set(tx_buf, mapping, mapping);
- dma_unmap_len_set(tx_buf, len, 0);
+ dma_unmap_len_set(tx_buf, len, len);
}
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index fa0077bc67b7..98971ae4f87d 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2472,10 +2472,8 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
work_done = bcmgenet_desc_rx(ring, budget);
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
+ if (work_done < budget && napi_complete_done(napi, work_done))
bcmgenet_rx_ring_int_enable(ring);
- }
if (ring->dim.use_dim) {
dim_update_sample(ring->dim.event_ctr, ring->dim.packets,
@@ -3988,6 +3986,8 @@ static int bcmgenet_probe(struct platform_device *pdev)
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
+ netdev_sw_irq_coalesce_default_on(dev);
+
/* Request the WOL interrupt and advertise suspend if available */
priv->wol_irq_disabled = true;
if (priv->wol_irq > 0) {
@@ -4092,6 +4092,12 @@ static int bcmgenet_probe(struct platform_device *pdev)
for (i = 0; i <= priv->hw_params->rx_queues; i++)
priv->rx_rings[i].rx_max_coalesced_frames = 1;
+ /* Initialize u64 stats seq counter for 32bit machines */
+ for (i = 0; i <= priv->hw_params->rx_queues; i++)
+ u64_stats_init(&priv->rx_rings[i].stats64.syncp);
+ for (i = 0; i <= priv->hw_params->tx_queues; i++)
+ u64_stats_init(&priv->tx_rings[i].stats64.syncp);
+
/* libphy will determine the link state */
netif_carrier_off(dev);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index b6437ba7a2eb..573e8b279e52 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -169,10 +169,15 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
reg &= ~EXT_GPHY_RESET;
} else {
+ reg |= EXT_GPHY_RESET;
+ bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+ mdelay(1);
+
reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN |
- EXT_GPHY_RESET | EXT_CFG_IDDQ_GLOBAL_PWR;
+ EXT_CFG_IDDQ_GLOBAL_PWR;
bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
mdelay(1);
+
reg |= EXT_CK25_DIS;
}
bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 91104cc2c238..b4dc93a48718 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6686,7 +6686,7 @@ static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
* We only need to fill in the address because the other members
* of the RX descriptor are invariant, see tg3_init_rings.
*
- * Note the purposeful assymetry of cpu vs. chip accesses. For
+ * Note the purposeful asymmetry of cpu vs. chip accesses. For
* posting buffers we only dirty the first cache line of the RX
* descriptor (containing the address). Whereas for the RX status
* buffers the cpu only reads the last cacheline of the RX descriptor
@@ -10145,7 +10145,7 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
/* Pseudo-header checksum is done by hardware logic and not
- * the offload processers, so make the chip do the pseudo-
+ * the offload processors, so make the chip do the pseudo-
* header checksums on receive. For transmit it is more
* convenient to do the pseudo-header checksum in software
* as Linux does that on transmit for us in all cases.
@@ -16610,7 +16610,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tg3_flag_set(tp, PCIX_TARGET_HWBUG);
- /* The chip can have it's power management PCI config
+ /* The chip can have its power management PCI config
* space registers clobbered due to this bug.
* So explicitly force the chip into D0 here.
*/
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index b473f8014d9c..a9e7f88fa26d 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2390,7 +2390,7 @@
#define TG3_CL45_D7_EEERES_STAT_LP_1000T 0x0004
-/* Fast Ethernet Tranceiver definitions */
+/* Fast Ethernet Transceiver definitions */
#define MII_TG3_FET_PTEST 0x17
#define MII_TG3_FET_PTEST_TRIM_SEL 0x0010
#define MII_TG3_FET_PTEST_TRIM_2 0x0002
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index d1f1ae5ea161..ce95fad8cedd 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -4109,8 +4109,12 @@ static const struct net_device_ops macb_netdev_ops = {
static void macb_configure_caps(struct macb *bp,
const struct macb_config *dt_conf)
{
+ struct device_node *np = bp->pdev->dev.of_node;
+ bool refclk_ext;
u32 dcfg;
+ refclk_ext = of_property_read_bool(np, "cdns,refclk-ext");
+
if (dt_conf)
bp->caps = dt_conf->caps;
@@ -4141,6 +4145,9 @@ static void macb_configure_caps(struct macb *bp,
}
}
+ if (refclk_ext)
+ bp->caps |= MACB_CAPS_USRIO_HAS_CLKEN;
+
dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
}
@@ -5096,6 +5103,7 @@ static const struct macb_config mpfs_config = {
static const struct macb_config sama7g5_gem_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG |
+ MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII |
MACB_CAPS_MIIONRGMII | MACB_CAPS_GEM_HAS_PTP,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
@@ -5105,8 +5113,7 @@ static const struct macb_config sama7g5_gem_config = {
static const struct macb_config sama7g5_emac_config = {
.caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII |
- MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_MIIONRGMII |
- MACB_CAPS_GEM_HAS_PTP,
+ MACB_CAPS_MIIONRGMII | MACB_CAPS_GEM_HAS_PTP,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -5654,6 +5661,20 @@ static int __maybe_unused macb_runtime_resume(struct device *dev)
return 0;
}
+static void macb_shutdown(struct platform_device *pdev)
+{
+ struct net_device *netdev = platform_get_drvdata(pdev);
+
+ rtnl_lock();
+
+ if (netif_running(netdev))
+ dev_close(netdev);
+
+ netif_device_detach(netdev);
+
+ rtnl_unlock();
+}
+
static const struct dev_pm_ops macb_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume)
SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL)
@@ -5667,6 +5688,7 @@ static struct platform_driver macb_driver = {
.of_match_table = of_match_ptr(macb_dt_ids),
.pm = &macb_pm_ops,
},
+ .shutdown = macb_shutdown,
};
module_platform_driver(macb_driver);
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
index 984f0dd7b62e..61e261657073 100644
--- a/drivers/net/ethernet/cavium/common/cavium_ptp.c
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -209,7 +209,7 @@ static int cavium_ptp_enable(struct ptp_clock_info *ptp_info,
return -EOPNOTSUPP;
}
-static u64 cavium_ptp_cc_read(const struct cyclecounter *cc)
+static u64 cavium_ptp_cc_read(struct cyclecounter *cc)
{
struct cavium_ptp *clock =
container_of(cc, struct cavium_ptp, cycle_counter);
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index ff8f2f9f9cae..75f22f74774c 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -1208,45 +1208,6 @@ int setup_cn23xx_octeon_pf_device(struct octeon_device *oct)
}
EXPORT_SYMBOL_GPL(setup_cn23xx_octeon_pf_device);
-int validate_cn23xx_pf_config_info(struct octeon_device *oct,
- struct octeon_config *conf23xx)
-{
- if (CFG_GET_IQ_MAX_Q(conf23xx) > CN23XX_MAX_INPUT_QUEUES) {
- dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n",
- __func__, CFG_GET_IQ_MAX_Q(conf23xx),
- CN23XX_MAX_INPUT_QUEUES);
- return 1;
- }
-
- if (CFG_GET_OQ_MAX_Q(conf23xx) > CN23XX_MAX_OUTPUT_QUEUES) {
- dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n",
- __func__, CFG_GET_OQ_MAX_Q(conf23xx),
- CN23XX_MAX_OUTPUT_QUEUES);
- return 1;
- }
-
- if (CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_32BYTE_INSTR &&
- CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_64BYTE_INSTR) {
- dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n",
- __func__);
- return 1;
- }
-
- if (!CFG_GET_OQ_REFILL_THRESHOLD(conf23xx)) {
- dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
- __func__);
- return 1;
- }
-
- if (!(CFG_GET_OQ_INTR_TIME(conf23xx))) {
- dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
- __func__);
- return 1;
- }
-
- return 0;
-}
-
int cn23xx_fw_loaded(struct octeon_device *oct)
{
u64 val;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
index 234b96b4f488..bbe9f3133b07 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
@@ -54,9 +54,6 @@ struct oct_vf_stats {
int setup_cn23xx_octeon_pf_device(struct octeon_device *oct);
-int validate_cn23xx_pf_config_info(struct octeon_device *oct,
- struct octeon_config *conf23xx);
-
u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
int cn23xx_sriov_config(struct octeon_device *oct);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
index 5b4cb725f60f..953edf0c7096 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -157,7 +157,7 @@ err_release_region:
response of the request.
* 0: the request will wait until its response gets back
* from the firmware within LIO_SC_MAX_TMO_MS milli sec.
- * It the response does not return within
+ * If the response does not return within
* LIO_SC_MAX_TMO_MS milli sec, lio_process_ordered_list()
* will move the request to zombie response list.
*
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
index 87dd6f89ce51..c139fc423764 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
@@ -268,7 +268,7 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
* @param oct - octeon device pointer
* @param ndata - control structure with queueing, and buffer information
*
- * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
+ * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if the
* queue should be stopped, and IQ_SEND_OK if it sent okay.
*/
int octnet_send_nic_data_pkt(struct octeon_device *oct,
@@ -278,7 +278,7 @@ int octnet_send_nic_data_pkt(struct octeon_device *oct,
/** Send a NIC control packet to the device
* @param oct - octeon device pointer
* @param nctrl - control structure with command, timout, and callback info
- * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
+ * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if the
* queue should be stopped, and IQ_SEND_OK if it sent okay.
*/
int
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index d0ff0c170b1a..fc6053414b7d 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -516,8 +516,8 @@ static int nicvf_set_ringparam(struct net_device *netdev,
return 0;
}
-static int nicvf_get_rss_hash_opts(struct nicvf *nic,
- struct ethtool_rxnfc *info)
+static int nicvf_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
{
info->data = 0;
@@ -552,25 +552,28 @@ static int nicvf_get_rxnfc(struct net_device *dev,
info->data = nic->rx_queues;
ret = 0;
break;
- case ETHTOOL_GRXFH:
- return nicvf_get_rss_hash_opts(nic, info);
default:
break;
}
return ret;
}
-static int nicvf_set_rss_hash_opts(struct nicvf *nic,
- struct ethtool_rxnfc *info)
+static int nicvf_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *info,
+ struct netlink_ext_ack *extack)
{
- struct nicvf_rss_info *rss = &nic->rss_info;
- u64 rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
+ struct nicvf *nic = netdev_priv(dev);
+ struct nicvf_rss_info *rss;
+ u64 rss_cfg;
+
+ rss = &nic->rss_info;
+ rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
if (!rss->enable)
netdev_err(nic->netdev,
"RSS is disabled, hash cannot be set\n");
- netdev_info(nic->netdev, "Set RSS flow type = %d, data = %lld\n",
+ netdev_info(nic->netdev, "Set RSS flow type = %d, data = %u\n",
info->flow_type, info->data);
if (!(info->data & RXH_IP_SRC) || !(info->data & RXH_IP_DST))
@@ -628,19 +631,6 @@ static int nicvf_set_rss_hash_opts(struct nicvf *nic,
return 0;
}
-static int nicvf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
-{
- struct nicvf *nic = netdev_priv(dev);
-
- switch (info->cmd) {
- case ETHTOOL_SRXFH:
- return nicvf_set_rss_hash_opts(nic, info);
- default:
- break;
- }
- return -EOPNOTSUPP;
-}
-
static u32 nicvf_get_rxfh_key_size(struct net_device *netdev)
{
return RSS_HASH_KEY_SIZE * sizeof(u64);
@@ -872,11 +862,12 @@ static const struct ethtool_ops nicvf_ethtool_ops = {
.get_ringparam = nicvf_get_ringparam,
.set_ringparam = nicvf_set_ringparam,
.get_rxnfc = nicvf_get_rxnfc,
- .set_rxnfc = nicvf_set_rxnfc,
.get_rxfh_key_size = nicvf_get_rxfh_key_size,
.get_rxfh_indir_size = nicvf_get_rxfh_indir_size,
.get_rxfh = nicvf_get_rxfh,
.set_rxfh = nicvf_set_rxfh,
+ .get_rxfh_fields = nicvf_get_rxfh_fields,
+ .set_rxfh_fields = nicvf_set_rxfh_fields,
.get_channels = nicvf_get_channels,
.set_channels = nicvf_set_channels,
.get_pauseparam = nicvf_get_pauseparam,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index aebb9fef3f6e..1be2dc40a1a6 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1578,7 +1578,6 @@ napi_del:
static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
{
struct nicvf *nic = netdev_priv(netdev);
- int orig_mtu = netdev->mtu;
/* For now just support only the usual MTU sized frames,
* plus some headroom for VLAN, QinQ.
@@ -1589,15 +1588,10 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL;
}
- WRITE_ONCE(netdev->mtu, new_mtu);
-
- if (!netif_running(netdev))
- return 0;
-
- if (nicvf_update_hw_max_frs(nic, new_mtu)) {
- netdev->mtu = orig_mtu;
+ if (netif_running(netdev) && nicvf_update_hw_max_frs(nic, new_mtu))
return -EINVAL;
- }
+
+ WRITE_ONCE(netdev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 3b7ad744b2dd..21495b5dce25 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1429,9 +1429,9 @@ static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl,
{
struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
struct bgx *bgx = context;
- char bgx_sel[5];
+ char bgx_sel[7];
- snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id);
+ snprintf(bgx_sel, sizeof(bgx_sel), "BGX%d", bgx->bgx_id);
if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) {
pr_warn("Invalid link device\n");
return AE_OK;
diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
index cbfa03d5663a..f3ada6e7cdc5 100644
--- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c
+++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
@@ -141,7 +141,7 @@ static int pm3393_interrupt_enable(struct cmac *cmac)
pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE,
0 /*SUNI1x10GEXP_BITMSK_TOP_INTE */ );
- /* TERMINATOR - PL_INTERUPTS_EXT */
+ /* TERMINATOR - PL_INTERRUPTS_EXT */
pl_intr = readl(cmac->adapter->regs + A_PL_ENABLE);
pl_intr |= F_PL_INTR_EXT;
writel(pl_intr, cmac->adapter->regs + A_PL_ENABLE);
@@ -179,7 +179,7 @@ static int pm3393_interrupt_disable(struct cmac *cmac)
elmer &= ~ELMER0_GP_BIT1;
t1_tpi_write(cmac->adapter, A_ELMER0_INT_ENABLE, elmer);
- /* TERMINATOR - PL_INTERUPTS_EXT */
+ /* TERMINATOR - PL_INTERRUPTS_EXT */
/* DO NOT DISABLE TERMINATOR's EXTERNAL INTERRUPTS. ANOTHER CHIP
* COULD WANT THEM ENABLED. We disable PM3393 at the ELMER level.
*/
@@ -222,7 +222,7 @@ static int pm3393_interrupt_clear(struct cmac *cmac)
elmer |= ELMER0_GP_BIT1;
t1_tpi_write(cmac->adapter, A_ELMER0_INT_CAUSE, elmer);
- /* TERMINATOR - PL_INTERUPTS_EXT
+ /* TERMINATOR - PL_INTERRUPTS_EXT
*/
pl_intr = readl(cmac->adapter->regs + A_PL_CAUSE);
pl_intr |= F_PL_INTR_EXT;
@@ -756,7 +756,7 @@ static int pm3393_mac_reset(adapter_t * adapter)
/* ??? If this fails, might be able to software reset the XAUI part
* and try to recover... thus saving us from doing another HW reset */
- /* Has the XAUI MABC PLL circuitry stablized? */
+ /* Has the XAUI MABC PLL circuitry stabilized? */
is_xaui_mabc_pll_locked =
(val & SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 9749d1239f58..5d5f3380ecca 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -176,43 +176,6 @@ again:
EXPORT_SYMBOL(t3_l2t_send_slow);
-void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e)
-{
-again:
- switch (e->state) {
- case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
- neigh_event_send(e->neigh, NULL);
- spin_lock_bh(&e->lock);
- if (e->state == L2T_STATE_STALE) {
- e->state = L2T_STATE_VALID;
- }
- spin_unlock_bh(&e->lock);
- return;
- case L2T_STATE_VALID: /* fast-path, send the packet on */
- return;
- case L2T_STATE_RESOLVING:
- spin_lock_bh(&e->lock);
- if (e->state != L2T_STATE_RESOLVING) {
- /* ARP already completed */
- spin_unlock_bh(&e->lock);
- goto again;
- }
- spin_unlock_bh(&e->lock);
-
- /*
- * Only the first packet added to the arpq should kick off
- * resolution. However, because the alloc_skb below can fail,
- * we allow each packet added to the arpq to retry resolution
- * as a way of recovering from transient memory exhaustion.
- * A better way would be to use a work request to retry L2T
- * entries when there's no memory.
- */
- neigh_event_send(e->neigh, NULL);
- }
-}
-
-EXPORT_SYMBOL(t3_l2t_send_event);
-
/*
* Allocate a free L2T entry. Must be called with l2t_data.lock held.
*/
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
index 646ca0bc25bd..33558f177497 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -113,7 +113,6 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
struct net_device *dev, const void *daddr);
int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
struct l2t_entry *e);
-void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
struct l2t_data *t3_init_l2t(unsigned int l2t_capacity);
int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 95e6f015a6af..0d85198fb03d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1316,7 +1316,7 @@ struct ch_sched_flowc {
* (value, mask) tuples. The associated ingress packet field matches the
* tuple when ((field & mask) == value). (Thus a wildcard "don't care" field
* rule can be constructed by specifying a tuple of (0, 0).) A filter rule
- * matches an ingress packet when all of the individual individual field
+ * matches an ingress packet when all of the individual field
* matching rules are true.
*
* Partial field masks are always valid, however, while it may be easy to
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 1546c3db08f0..23326235d4ab 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -1730,6 +1730,60 @@ static int cxgb4_ntuple_get_filter(struct net_device *dev,
return 0;
}
+static int cxgb4_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ unsigned int v = pi->rss_mode;
+
+ info->data = 0;
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case UDP_V4_FLOW:
+ if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
+ (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case IPV4_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case UDP_V6_FLOW:
+ if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
+ (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV6_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ }
+ return 0;
+}
+
static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rules)
{
@@ -1739,56 +1793,6 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
int ret = 0;
switch (info->cmd) {
- case ETHTOOL_GRXFH: {
- unsigned int v = pi->rss_mode;
-
- info->data = 0;
- switch (info->flow_type) {
- case TCP_V4_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case UDP_V4_FLOW:
- if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
- (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
- info->data = RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case SCTP_V4_FLOW:
- case AH_ESP_V4_FLOW:
- case IPV4_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case TCP_V6_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case UDP_V6_FLOW:
- if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
- (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
- info->data = RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case SCTP_V6_FLOW:
- case AH_ESP_V6_FLOW:
- case IPV6_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- }
- return 0;
- }
case ETHTOOL_GRXRINGS:
info->data = pi->nqsets;
return 0;
@@ -2199,6 +2203,7 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.get_rxfh_indir_size = get_rss_table_size,
.get_rxfh = get_rss_table,
.set_rxfh = set_rss_table,
+ .get_rxfh_fields = cxgb4_get_rxfh_fields,
.self_test = cxgb4_self_test,
.flash_device = set_flash,
.get_ts_info = get_ts_info,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 51395c96b2e9..392723ef14e5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3297,7 +3297,7 @@ static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
}
if (max_tx_rate == 0) {
- /* unbind VF to to any Traffic Class */
+ /* unbind VF to any Traffic Class */
fw_pfvf =
(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
@@ -4816,7 +4816,7 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
goto bye;
}
- /* Get FW from from /lib/firmware/ */
+ /* Get FW from /lib/firmware/ */
ret = request_firmware(&fw, fw_info->fw_mod_name,
adap->pdev_dev);
if (ret < 0) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index a5d2f84dcdd5..8524246fd67e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -186,7 +186,7 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
/* Ensure that uhtid is either root u32 (i.e. 0x800)
- * or a a valid linked bucket.
+ * or a valid linked bucket.
*/
if (uhtid != 0x800 && uhtid >= t->size)
return -EINVAL;
@@ -422,7 +422,7 @@ int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
uhtid = TC_U32_USERHTID(cls->knode.handle);
/* Ensure that uhtid is either root u32 (i.e. 0x800)
- * or a a valid linked bucket.
+ * or a valid linked bucket.
*/
if (uhtid != 0x800 && uhtid >= t->size)
return -EINVAL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 64402e3646b3..9fccb8ea9bcd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -163,7 +163,7 @@ static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
* for DMA, but this is of course never sent to the hardware and is only used
* to prevent double unmappings. All of the above requires that the Free List
* Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
- * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
+ * 32-byte or a power of 2 greater in alignment. Since the SGE's minimal
* Free List Buffer alignment is 32 bytes, this works out for us ...
*/
enum {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 175bf9b13058..171750fad44f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -9348,7 +9348,7 @@ int t4_init_devlog_params(struct adapter *adap)
return 0;
}
- /* Otherwise, ask the firmware for it's Device Log Parameters.
+ /* Otherwise, ask the firmware for its Device Log Parameters.
*/
memset(&devlog_cmd, 0, sizeof(devlog_cmd));
devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 4e6ecb9c8dcc..31fab2415743 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2191,7 +2191,7 @@ static void __iomem *bar2_address(struct adapter *adapter,
/**
* t4vf_sge_alloc_rxq - allocate an SGE RX Queue
* @adapter: the adapter
- * @rspq: pointer to to the new rxq's Response Queue to be filled in
+ * @rspq: pointer to the new rxq's Response Queue to be filled in
* @iqasynch: if 0, a normal rspq; if 1, an asynchronous event queue
* @dev: the network device associated with the new rspq
* @intr_dest: MSI-X vector index (overriden in MSI mode)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 1c52592d3b65..56fcc531af2e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -706,7 +706,7 @@ int t4vf_fl_pkt_align(struct adapter *adapter)
* separately. The actual Ingress Packet Data alignment boundary
* within Packed Buffer Mode is the maximum of these two
* specifications. (Note that it makes no real practical sense to
- * have the Pading Boudary be larger than the Packing Boundary but you
+ * have the Padding Boundary be larger than the Packing Boundary but you
* could set the chip up that way and, in fact, legacy T4 code would
* end doing this because it would initialize the Padding Boundary and
* leave the Packing Boundary initialized to 0 (16 bytes).)
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
index d567e42e1760..465fa8077964 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
@@ -1096,8 +1096,7 @@ new_buf:
copy = size;
if (msg->msg_flags & MSG_SPLICE_PAGES) {
- err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
- sk->sk_allocation);
+ err = skb_splice_from_iter(skb, &msg->msg_iter, copy);
if (err < 0) {
if (err == -EMSGSIZE)
goto new_buf;
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 529160926a96..a50f5dad34d5 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -528,8 +528,10 @@ static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
return 0;
}
-static int enic_get_rx_flow_hash(struct enic *enic, struct ethtool_rxnfc *cmd)
+static int enic_get_rx_flow_hash(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct enic *enic = netdev_priv(dev);
u8 rss_hash_type = 0;
cmd->data = 0;
@@ -597,9 +599,6 @@ static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
ret = enic_grxclsrule(enic, cmd);
spin_unlock_bh(&enic->rfs_h.lock);
break;
- case ETHTOOL_GRXFH:
- ret = enic_get_rx_flow_hash(enic, cmd);
- break;
default:
ret = -EOPNOTSUPP;
break;
@@ -693,6 +692,7 @@ static const struct ethtool_ops enic_ethtool_ops = {
.get_rxfh_key_size = enic_get_rxfh_key_size,
.get_rxfh = enic_get_rxfh,
.set_rxfh = enic_set_rxfh,
+ .get_rxfh_fields = enic_get_rx_flow_hash,
.get_link_ksettings = enic_get_ksettings,
.get_ts_info = enic_get_ts_info,
.get_channels = enic_get_channels,
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 773f5ad972a2..6bc8dfdb3d4b 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1864,10 +1864,10 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu)
if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
return -EOPNOTSUPP;
- if (netdev->mtu > enic->port_mtu)
+ if (new_mtu > enic->port_mtu)
netdev_warn(netdev,
"interface MTU (%d) set higher than port MTU (%d)\n",
- netdev->mtu, enic->port_mtu);
+ new_mtu, enic->port_mtu);
return _enic_change_mtu(netdev, new_mtu);
}
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 5b7e6eb080f3..b608585f1954 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1550,7 +1550,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
(PCI_SLOT(pdev->devfn) == 12))) {
/* Cobalt MAC address in first EEPROM locations. */
sa_offset = 0;
- /* Ensure our media table fixup get's applied */
+ /* Ensure our media table fixup gets applied */
memcpy(ee_data + 16, ee_data, 8);
}
#endif
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index 8759f9f76b62..e5d2ede13845 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -143,7 +143,7 @@ static const struct pci_device_id xircom_pci_table[] = {
};
MODULE_DEVICE_TABLE(pci, xircom_pci_table);
-static struct pci_driver xircom_ops = {
+static struct pci_driver xircom_driver = {
.name = "xircom_cb",
.id_table = xircom_pci_table,
.probe = xircom_probe,
@@ -1169,4 +1169,4 @@ investigate_write_descriptor(struct net_device *dev,
}
}
-module_pci_driver(xircom_ops);
+module_pci_driver(xircom_driver);
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index da9b7715df05..cc60ee454bf9 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -99,6 +99,13 @@ static const struct net_device_ops netdev_ops = {
.ndo_tx_timeout = rio_tx_timeout,
};
+static bool is_support_rmon_mmio(struct pci_dev *pdev)
+{
+ return pdev->vendor == PCI_VENDOR_ID_DLINK &&
+ pdev->device == 0x4000 &&
+ pdev->revision == 0x0c;
+}
+
static int
rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -131,18 +138,22 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
np = netdev_priv(dev);
+ if (is_support_rmon_mmio(pdev))
+ np->rmon_enable = true;
+
/* IO registers range. */
ioaddr = pci_iomap(pdev, 0, 0);
if (!ioaddr)
goto err_out_dev;
np->eeprom_addr = ioaddr;
-#ifdef MEM_MAPPING
- /* MM registers range. */
- ioaddr = pci_iomap(pdev, 1, 0);
- if (!ioaddr)
- goto err_out_iounmap;
-#endif
+ if (np->rmon_enable) {
+ /* MM registers range. */
+ ioaddr = pci_iomap(pdev, 1, 0);
+ if (!ioaddr)
+ goto err_out_iounmap;
+ }
+
np->ioaddr = ioaddr;
np->chip_id = chip_idx;
np->pdev = pdev;
@@ -289,9 +300,8 @@ err_out_unmap_tx:
dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
np->tx_ring_dma);
err_out_iounmap:
-#ifdef MEM_MAPPING
- pci_iounmap(pdev, np->ioaddr);
-#endif
+ if (np->rmon_enable)
+ pci_iounmap(pdev, np->ioaddr);
pci_iounmap(pdev, np->eeprom_addr);
err_out_dev:
free_netdev (dev);
@@ -578,7 +588,8 @@ static void rio_hw_init(struct net_device *dev)
dw8(TxDMAPollPeriod, 0xff);
dw8(RxDMABurstThresh, 0x30);
dw8(RxDMAUrgentThresh, 0x30);
- dw32(RmonStatMask, 0x0007ffff);
+ if (!np->rmon_enable)
+ dw32(RmonStatMask, 0x0007ffff);
/* clear statistics */
clear_stats (dev);
@@ -1076,9 +1087,6 @@ get_stats (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
-#ifdef MEM_MAPPING
- int i;
-#endif
unsigned int stat_reg;
unsigned long flags;
@@ -1123,10 +1131,10 @@ get_stats (struct net_device *dev)
dr16(MacControlFramesXmtd);
dr16(FramesWEXDeferal);
-#ifdef MEM_MAPPING
- for (i = 0x100; i <= 0x150; i += 4)
- dr32(i);
-#endif
+ if (np->rmon_enable)
+ for (int i = 0x100; i <= 0x150; i += 4)
+ dr32(i);
+
dr16(TxJumboFrames);
dr16(RxJumboFrames);
dr16(TCPCheckSumErrors);
@@ -1143,9 +1151,6 @@ clear_stats (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
-#ifdef MEM_MAPPING
- int i;
-#endif
/* All statistics registers need to be acknowledged,
else statistic overflow could cause problems */
@@ -1181,10 +1186,9 @@ clear_stats (struct net_device *dev)
dr16(BcstFramesXmtdOk);
dr16(MacControlFramesXmtd);
dr16(FramesWEXDeferal);
-#ifdef MEM_MAPPING
- for (i = 0x100; i <= 0x150; i += 4)
- dr32(i);
-#endif
+ if (np->rmon_enable)
+ for (int i = 0x100; i <= 0x150; i += 4)
+ dr32(i);
dr16(TxJumboFrames);
dr16(RxJumboFrames);
dr16(TCPCheckSumErrors);
@@ -1810,9 +1814,8 @@ rio_remove1 (struct pci_dev *pdev)
np->rx_ring_dma);
dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
np->tx_ring_dma);
-#ifdef MEM_MAPPING
- pci_iounmap(pdev, np->ioaddr);
-#endif
+ if (np->rmon_enable)
+ pci_iounmap(pdev, np->ioaddr);
pci_iounmap(pdev, np->eeprom_addr);
free_netdev (dev);
pci_release_regions (pdev);
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index ba679025e866..4788cc94639d 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -403,6 +403,8 @@ struct netdev_private {
u16 negotiate; /* Negotiated media */
int phy_addr; /* PHY addresses. */
u16 led_mode; /* LED mode read from EEPROM (IP1000A only) */
+
+ bool rmon_enable;
};
/* The station address location in the EEPROM. */
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index f001a649f58f..f9216326bdfe 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1073,10 +1073,19 @@ static void be_set_msg_level(struct net_device *netdev, u32 level)
adapter->msg_enable = level;
}
-static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
+static int be_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ u64 flow_type = cmd->flow_type;
u64 data = 0;
+ if (!be_multi_rxq(adapter)) {
+ dev_info(&adapter->pdev->dev,
+ "ethtool::get_rxfh: RX flow hashing is disabled\n");
+ return -EINVAL;
+ }
+
switch (flow_type) {
case TCP_V4_FLOW:
if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
@@ -1104,7 +1113,8 @@ static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
break;
}
- return data;
+ cmd->data = data;
+ return 0;
}
static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
@@ -1119,9 +1129,6 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
}
switch (cmd->cmd) {
- case ETHTOOL_GRXFH:
- cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type);
- break;
case ETHTOOL_GRXRINGS:
cmd->data = adapter->num_rx_qs;
break;
@@ -1132,11 +1139,19 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
return 0;
}
-static int be_set_rss_hash_opts(struct be_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+static int be_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
- int status;
+ struct be_adapter *adapter = netdev_priv(netdev);
u32 rss_flags = adapter->rss_info.rss_flags;
+ int status;
+
+ if (!be_multi_rxq(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "ethtool::set_rxfh: RX flow hashing is disabled\n");
+ return -EINVAL;
+ }
if (cmd->data != L3_RSS_FLAGS &&
cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS))
@@ -1195,28 +1210,6 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
return be_cmd_status(status);
}
-static int be_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- int status = 0;
-
- if (!be_multi_rxq(adapter)) {
- dev_err(&adapter->pdev->dev,
- "ethtool::set_rxnfc: RX flow hashing is disabled\n");
- return -EINVAL;
- }
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- status = be_set_rss_hash_opts(adapter, cmd);
- break;
- default:
- return -EINVAL;
- }
-
- return status;
-}
-
static void be_get_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
@@ -1449,7 +1442,8 @@ const struct ethtool_ops be_ethtool_ops = {
.flash_device = be_do_flash,
.self_test = be_self_test,
.get_rxnfc = be_get_rxnfc,
- .set_rxnfc = be_set_rxnfc,
+ .get_rxfh_fields = be_get_rxfh_fields,
+ .set_rxfh_fields = be_set_rxfh_fields,
.get_rxfh_indir_size = be_get_rxfh_indir_size,
.get_rxfh_key_size = be_get_rxfh_key_size,
.get_rxfh = be_get_rxfh,
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 3d2e21592119..cb004fd16252 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1465,10 +1465,10 @@ static void be_tx_timeout(struct net_device *netdev, unsigned int txqueue)
ntohs(tcphdr->source));
dev_info(dev, "TCP dest port %d\n",
ntohs(tcphdr->dest));
- dev_info(dev, "TCP sequence num %d\n",
- ntohs(tcphdr->seq));
- dev_info(dev, "TCP ack_seq %d\n",
- ntohs(tcphdr->ack_seq));
+ dev_info(dev, "TCP sequence num %u\n",
+ ntohl(tcphdr->seq));
+ dev_info(dev, "TCP ack_seq %u\n",
+ ntohl(tcphdr->ack_seq));
} else if (ip_hdr(skb)->protocol ==
IPPROTO_UDP) {
udphdr = udp_hdr(skb);
@@ -4031,8 +4031,7 @@ static int be_vxlan_unset_port(struct net_device *netdev, unsigned int table,
static const struct udp_tunnel_nic_info be_udp_tunnels = {
.set_port = be_vxlan_set_port,
.unset_port = be_vxlan_unset_port,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
},
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index a98d5af3f9e3..5d0c0906878d 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -9,6 +9,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
+#include <linux/reset.h>
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -101,6 +102,8 @@ struct ftgmac100 {
/* AST2500/AST2600 RMII ref clock gate */
struct clk *rclk;
+ /* Aspeed reset control */
+ struct reset_control *rst;
/* Link management */
int cur_speed;
@@ -148,6 +151,23 @@ static int ftgmac100_reset_and_config_mac(struct ftgmac100 *priv)
{
u32 maccr = 0;
+ /* Aspeed RMII needs SCU reset to clear status */
+ if (priv->is_aspeed && priv->netdev->phydev->interface == PHY_INTERFACE_MODE_RMII) {
+ int err;
+
+ err = reset_control_assert(priv->rst);
+ if (err) {
+ dev_err(priv->dev, "Failed to reset mac (%d)\n", err);
+ return err;
+ }
+ usleep_range(10000, 20000);
+ err = reset_control_deassert(priv->rst);
+ if (err) {
+ dev_err(priv->dev, "Failed to deassert mac reset (%d)\n", err);
+ return err;
+ }
+ }
+
switch (priv->cur_speed) {
case SPEED_10:
case 0: /* no link */
@@ -1428,7 +1448,7 @@ static void ftgmac100_adjust_link(struct net_device *netdev)
/* Disable all interrupts */
iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
- /* Release phy lock to allow ftgmac100_reset to aquire it, keeping lock
+ /* Release phy lock to allow ftgmac100_reset to acquire it, keeping lock
* order consistent to prevent dead lock.
*/
if (netdev->phydev)
@@ -1968,6 +1988,12 @@ static int ftgmac100_probe(struct platform_device *pdev)
}
+ priv->rst = devm_reset_control_get_optional_exclusive(priv->dev, NULL);
+ if (IS_ERR(priv->rst)) {
+ err = PTR_ERR(priv->rst);
+ goto err_phy_connect;
+ }
+
if (priv->is_aspeed) {
err = ftgmac100_setup_clk(priv);
if (err)
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 23c23cca2620..3edc8d142dd5 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -28,7 +28,6 @@
#include <linux/percpu.h>
#include <linux/dma-mapping.h>
#include <linux/sort.h>
-#include <linux/phy_fixed.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <soc/fsl/bman.h>
@@ -3150,7 +3149,6 @@ static const struct net_device_ops dpaa_ops = {
.ndo_stop = dpaa_eth_stop,
.ndo_tx_timeout = dpaa_tx_timeout,
.ndo_get_stats64 = dpaa_get_stats64,
- .ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = dpaa_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = dpaa_set_rx_mode,
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 9986f6e1f587..0c588e03b15e 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -263,8 +263,8 @@ static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
ethtool_puts(&data, dpaa_stats_global[i]);
}
-static int dpaa_get_hash_opts(struct net_device *dev,
- struct ethtool_rxnfc *cmd)
+static int dpaa_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
struct dpaa_priv *priv = netdev_priv(dev);
@@ -299,22 +299,6 @@ static int dpaa_get_hash_opts(struct net_device *dev,
return 0;
}
-static int dpaa_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
- u32 *unused)
-{
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_GRXFH:
- ret = dpaa_get_hash_opts(dev, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static void dpaa_set_hash(struct net_device *net_dev, bool enable)
{
struct mac_device *mac_dev;
@@ -329,8 +313,9 @@ static void dpaa_set_hash(struct net_device *net_dev, bool enable)
priv->keygen_in_use = enable;
}
-static int dpaa_set_hash_opts(struct net_device *dev,
- struct ethtool_rxnfc *nfc)
+static int dpaa_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
int ret = -EINVAL;
@@ -364,21 +349,6 @@ static int dpaa_set_hash_opts(struct net_device *dev,
return ret;
}
-static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
-{
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = dpaa_set_hash_opts(dev, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static int dpaa_get_ts_info(struct net_device *net_dev,
struct kernel_ethtool_ts_info *info)
{
@@ -510,8 +480,8 @@ const struct ethtool_ops dpaa_ethtool_ops = {
.get_strings = dpaa_get_strings,
.get_link_ksettings = dpaa_get_link_ksettings,
.set_link_ksettings = dpaa_set_link_ksettings,
- .get_rxnfc = dpaa_get_rxnfc,
- .set_rxnfc = dpaa_set_rxnfc,
+ .get_rxfh_fields = dpaa_get_rxfh_fields,
+ .set_rxfh_fields = dpaa_set_rxfh_fields,
.get_ts_info = dpaa_get_ts_info,
.get_coalesce = dpaa_get_coalesce,
.set_coalesce = dpaa_set_coalesce,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 2ec2c3dab250..0f4efd505332 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -3939,6 +3939,7 @@ static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv,
MEM_TYPE_PAGE_ORDER0, NULL);
if (err) {
dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
+ xdp_rxq_info_unreg(&fq->channel->xdp_rxq);
return err;
}
@@ -4432,17 +4433,25 @@ static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
return -EINVAL;
}
if (err)
- return err;
+ goto out;
}
err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
DPNI_QUEUE_TX, &priv->tx_qdid);
if (err) {
dev_err(dev, "dpni_get_qdid() failed\n");
- return err;
+ goto out;
}
return 0;
+
+out:
+ while (i--) {
+ if (priv->fq[i].type == DPAA2_RX_FQ &&
+ xdp_rxq_info_is_reg(&priv->fq[i].channel->xdp_rxq))
+ xdp_rxq_info_unreg(&priv->fq[i].channel->xdp_rxq);
+ }
+ return err;
}
/* Allocate rings for storing incoming frame descriptors */
@@ -4657,12 +4666,19 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
return PTR_ERR(dpmac_dev);
}
- if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
+ if (IS_ERR(dpmac_dev))
return 0;
+ if (dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) {
+ err = 0;
+ goto out_put_device;
+ }
+
mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
- if (!mac)
- return -ENOMEM;
+ if (!mac) {
+ err = -ENOMEM;
+ goto out_put_device;
+ }
mac->mc_dev = dpmac_dev;
mac->mc_io = priv->mc_io;
@@ -4696,6 +4712,8 @@ err_close_mac:
dpaa2_mac_close(mac);
err_free_mac:
kfree(mac);
+out_put_device:
+ put_device(&dpmac_dev->dev);
return err;
}
@@ -4825,6 +4843,17 @@ static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv)
}
}
+static void dpaa2_eth_free_rx_xdp_rxq(struct dpaa2_eth_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ if (priv->fq[i].type == DPAA2_RX_FQ &&
+ xdp_rxq_info_is_reg(&priv->fq[i].channel->xdp_rxq))
+ xdp_rxq_info_unreg(&priv->fq[i].channel->xdp_rxq);
+ }
+}
+
static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
{
struct device *dev;
@@ -5028,6 +5057,7 @@ err_alloc_percpu_extras:
free_percpu(priv->percpu_stats);
err_alloc_percpu_stats:
dpaa2_eth_del_ch_napi(priv);
+ dpaa2_eth_free_rx_xdp_rxq(priv);
err_bind:
dpaa2_eth_free_dpbps(priv);
err_dpbp_setup:
@@ -5080,6 +5110,7 @@ static void dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
free_percpu(priv->percpu_extras);
dpaa2_eth_del_ch_napi(priv);
+ dpaa2_eth_free_rx_xdp_rxq(priv);
dpaa2_eth_free_dpbps(priv);
dpaa2_eth_free_dpio(priv);
dpaa2_eth_free_dpni(priv);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 74ef77cb7078..00474ed11d53 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -719,13 +719,6 @@ static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
int i, j = 0;
switch (rxnfc->cmd) {
- case ETHTOOL_GRXFH:
- /* we purposely ignore cmd->flow_type for now, because the
- * classifier only supports a single set of fields for all
- * protocols
- */
- rxnfc->data = priv->rx_hash_fields;
- break;
case ETHTOOL_GRXRINGS:
rxnfc->data = dpaa2_eth_queue_count(priv);
break;
@@ -767,11 +760,6 @@ static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
int err = 0;
switch (rxnfc->cmd) {
- case ETHTOOL_SRXFH:
- if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data)
- return -EOPNOTSUPP;
- err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
- break;
case ETHTOOL_SRXCLSRLINS:
err = dpaa2_eth_update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
break;
@@ -785,6 +773,28 @@ static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
return err;
}
+static int dpaa2_eth_get_rxfh_fields(struct net_device *net_dev,
+ struct ethtool_rxfh_fields *rxnfc)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ /* we purposely ignore cmd->flow_type for now, because the
+ * classifier only supports a single set of fields for all
+ * protocols
+ */
+ rxnfc->data = priv->rx_hash_fields;
+ return 0;
+}
+
+static int dpaa2_eth_set_rxfh_fields(struct net_device *net_dev,
+ const struct ethtool_rxfh_fields *rxnfc,
+ struct netlink_ext_ack *extack)
+{
+ if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data)
+ return -EOPNOTSUPP;
+ return dpaa2_eth_set_hash(net_dev, rxnfc->data);
+}
+
int dpaa2_phc_index = -1;
EXPORT_SYMBOL(dpaa2_phc_index);
@@ -939,6 +949,8 @@ const struct ethtool_ops dpaa2_ethtool_ops = {
.get_strings = dpaa2_eth_get_strings,
.get_rxnfc = dpaa2_eth_get_rxnfc,
.set_rxnfc = dpaa2_eth_set_rxnfc,
+ .get_rxfh_fields = dpaa2_eth_get_rxfh_fields,
+ .set_rxfh_fields = dpaa2_eth_set_rxfh_fields,
.get_ts_info = dpaa2_eth_get_ts_info,
.get_tunable = dpaa2_eth_get_tunable,
.set_tunable = dpaa2_eth_set_tunable,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index 147a93bf9fa9..4643a3380618 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -1448,12 +1448,19 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
return PTR_ERR(dpmac_dev);
- if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
+ if (IS_ERR(dpmac_dev))
return 0;
+ if (dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) {
+ err = 0;
+ goto out_put_device;
+ }
+
mac = kzalloc(sizeof(*mac), GFP_KERNEL);
- if (!mac)
- return -ENOMEM;
+ if (!mac) {
+ err = -ENOMEM;
+ goto out_put_device;
+ }
mac->mc_dev = dpmac_dev;
mac->mc_io = port_priv->ethsw_data->mc_io;
@@ -1483,6 +1490,8 @@ err_close_mac:
dpaa2_mac_close(mac);
err_free_mac:
kfree(mac);
+out_put_device:
+ put_device(&dpmac_dev->dev);
return err;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
index a466c2379146..4b0ae7d9af92 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
@@ -448,7 +448,5 @@ bool dpaa2_xsk_tx(struct dpaa2_eth_priv *priv,
percpu_stats->tx_errors++;
}
- xsk_tx_release(ch->xsk_pool);
-
return total_enqueued == budget;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index dcc3fbac3481..e4287725832e 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -1375,6 +1375,7 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring,
}
if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) {
+ struct enetc_hw *hw = &priv->si->hw;
__be16 tpid = 0;
switch (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TPID) {
@@ -1385,15 +1386,12 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring,
tpid = htons(ETH_P_8021AD);
break;
case 2:
- tpid = htons(enetc_port_rd(&priv->si->hw,
- ENETC_PCVLANR1));
+ tpid = htons(enetc_rd_hot(hw, ENETC_SICVLANR1) &
+ SICVLANR_ETYPE);
break;
case 3:
- tpid = htons(enetc_port_rd(&priv->si->hw,
- ENETC_PCVLANR2));
- break;
- default:
- break;
+ tpid = htons(enetc_rd_hot(hw, ENETC_SICVLANR2) &
+ SICVLANR_ETYPE);
}
__vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt));
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 872d2cbd088b..62e8ee4d2f04 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -96,17 +96,17 @@ struct enetc_rx_swbd {
#define ENETC_TXBDS_MAX_NEEDED(x) ENETC_TXBDS_NEEDED((x) + 1)
struct enetc_ring_stats {
- unsigned int packets;
- unsigned int bytes;
- unsigned int rx_alloc_errs;
- unsigned int xdp_drops;
- unsigned int xdp_tx;
- unsigned int xdp_tx_drops;
- unsigned int xdp_redirect;
- unsigned int xdp_redirect_failures;
- unsigned int recycles;
- unsigned int recycle_failures;
- unsigned int win_drop;
+ unsigned long packets;
+ unsigned long bytes;
+ unsigned long rx_alloc_errs;
+ unsigned long xdp_drops;
+ unsigned long xdp_tx;
+ unsigned long xdp_tx_drops;
+ unsigned long xdp_redirect;
+ unsigned long xdp_redirect_failures;
+ unsigned long recycles;
+ unsigned long recycle_failures;
+ unsigned long win_drop;
};
struct enetc_xdp_data {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index d38cd36be4a6..961e76cd8489 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -142,7 +142,7 @@ static const struct {
static const struct {
int reg;
char name[ETH_GSTRING_LEN] __nonstring;
-} enetc_port_counters[] = {
+} enetc_pm_counters[] = {
{ ENETC_PM_REOCT(0), "MAC rx ethernet octets" },
{ ENETC_PM_RALN(0), "MAC rx alignment errors" },
{ ENETC_PM_RXPF(0), "MAC rx valid pause frames" },
@@ -194,6 +194,12 @@ static const struct {
{ ENETC_PM_TSCOL(0), "MAC tx single collisions" },
{ ENETC_PM_TLCOL(0), "MAC tx late collisions" },
{ ENETC_PM_TECOL(0), "MAC tx excessive collisions" },
+};
+
+static const struct {
+ int reg;
+ char name[ETH_GSTRING_LEN] __nonstring;
+} enetc_port_counters[] = {
{ ENETC_UFDMF, "SI MAC nomatch u-cast discards" },
{ ENETC_MFDMF, "SI MAC nomatch m-cast discards" },
{ ENETC_PBFDSIR, "SI MAC nomatch b-cast discards" },
@@ -240,6 +246,7 @@ static int enetc_get_sset_count(struct net_device *ndev, int sset)
return len;
len += ARRAY_SIZE(enetc_port_counters);
+ len += ARRAY_SIZE(enetc_pm_counters);
return len;
}
@@ -266,6 +273,9 @@ static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++)
ethtool_cpy(&data, enetc_port_counters[i].name);
+ for (i = 0; i < ARRAY_SIZE(enetc_pm_counters); i++)
+ ethtool_cpy(&data, enetc_pm_counters[i].name);
+
break;
}
}
@@ -302,13 +312,16 @@ static void enetc_get_ethtool_stats(struct net_device *ndev,
for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++)
data[o++] = enetc_port_rd(hw, enetc_port_counters[i].reg);
+
+ for (i = 0; i < ARRAY_SIZE(enetc_pm_counters); i++)
+ data[o++] = enetc_port_rd64(hw, enetc_pm_counters[i].reg);
}
static void enetc_pause_stats(struct enetc_hw *hw, int mac,
struct ethtool_pause_stats *pause_stats)
{
- pause_stats->tx_pause_frames = enetc_port_rd(hw, ENETC_PM_TXPF(mac));
- pause_stats->rx_pause_frames = enetc_port_rd(hw, ENETC_PM_RXPF(mac));
+ pause_stats->tx_pause_frames = enetc_port_rd64(hw, ENETC_PM_TXPF(mac));
+ pause_stats->rx_pause_frames = enetc_port_rd64(hw, ENETC_PM_RXPF(mac));
}
static void enetc_get_pause_stats(struct net_device *ndev,
@@ -335,31 +348,31 @@ static void enetc_get_pause_stats(struct net_device *ndev,
static void enetc_mac_stats(struct enetc_hw *hw, int mac,
struct ethtool_eth_mac_stats *s)
{
- s->FramesTransmittedOK = enetc_port_rd(hw, ENETC_PM_TFRM(mac));
- s->SingleCollisionFrames = enetc_port_rd(hw, ENETC_PM_TSCOL(mac));
- s->MultipleCollisionFrames = enetc_port_rd(hw, ENETC_PM_TMCOL(mac));
- s->FramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RFRM(mac));
- s->FrameCheckSequenceErrors = enetc_port_rd(hw, ENETC_PM_RFCS(mac));
- s->AlignmentErrors = enetc_port_rd(hw, ENETC_PM_RALN(mac));
- s->OctetsTransmittedOK = enetc_port_rd(hw, ENETC_PM_TEOCT(mac));
- s->FramesWithDeferredXmissions = enetc_port_rd(hw, ENETC_PM_TDFR(mac));
- s->LateCollisions = enetc_port_rd(hw, ENETC_PM_TLCOL(mac));
- s->FramesAbortedDueToXSColls = enetc_port_rd(hw, ENETC_PM_TECOL(mac));
- s->FramesLostDueToIntMACXmitError = enetc_port_rd(hw, ENETC_PM_TERR(mac));
- s->CarrierSenseErrors = enetc_port_rd(hw, ENETC_PM_TCRSE(mac));
- s->OctetsReceivedOK = enetc_port_rd(hw, ENETC_PM_REOCT(mac));
- s->FramesLostDueToIntMACRcvError = enetc_port_rd(hw, ENETC_PM_RDRNTP(mac));
- s->MulticastFramesXmittedOK = enetc_port_rd(hw, ENETC_PM_TMCA(mac));
- s->BroadcastFramesXmittedOK = enetc_port_rd(hw, ENETC_PM_TBCA(mac));
- s->MulticastFramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RMCA(mac));
- s->BroadcastFramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RBCA(mac));
+ s->FramesTransmittedOK = enetc_port_rd64(hw, ENETC_PM_TFRM(mac));
+ s->SingleCollisionFrames = enetc_port_rd64(hw, ENETC_PM_TSCOL(mac));
+ s->MultipleCollisionFrames = enetc_port_rd64(hw, ENETC_PM_TMCOL(mac));
+ s->FramesReceivedOK = enetc_port_rd64(hw, ENETC_PM_RFRM(mac));
+ s->FrameCheckSequenceErrors = enetc_port_rd64(hw, ENETC_PM_RFCS(mac));
+ s->AlignmentErrors = enetc_port_rd64(hw, ENETC_PM_RALN(mac));
+ s->OctetsTransmittedOK = enetc_port_rd64(hw, ENETC_PM_TEOCT(mac));
+ s->FramesWithDeferredXmissions = enetc_port_rd64(hw, ENETC_PM_TDFR(mac));
+ s->LateCollisions = enetc_port_rd64(hw, ENETC_PM_TLCOL(mac));
+ s->FramesAbortedDueToXSColls = enetc_port_rd64(hw, ENETC_PM_TECOL(mac));
+ s->FramesLostDueToIntMACXmitError = enetc_port_rd64(hw, ENETC_PM_TERR(mac));
+ s->CarrierSenseErrors = enetc_port_rd64(hw, ENETC_PM_TCRSE(mac));
+ s->OctetsReceivedOK = enetc_port_rd64(hw, ENETC_PM_REOCT(mac));
+ s->FramesLostDueToIntMACRcvError = enetc_port_rd64(hw, ENETC_PM_RDRNTP(mac));
+ s->MulticastFramesXmittedOK = enetc_port_rd64(hw, ENETC_PM_TMCA(mac));
+ s->BroadcastFramesXmittedOK = enetc_port_rd64(hw, ENETC_PM_TBCA(mac));
+ s->MulticastFramesReceivedOK = enetc_port_rd64(hw, ENETC_PM_RMCA(mac));
+ s->BroadcastFramesReceivedOK = enetc_port_rd64(hw, ENETC_PM_RBCA(mac));
}
static void enetc_ctrl_stats(struct enetc_hw *hw, int mac,
struct ethtool_eth_ctrl_stats *s)
{
- s->MACControlFramesTransmitted = enetc_port_rd(hw, ENETC_PM_TCNP(mac));
- s->MACControlFramesReceived = enetc_port_rd(hw, ENETC_PM_RCNP(mac));
+ s->MACControlFramesTransmitted = enetc_port_rd64(hw, ENETC_PM_TCNP(mac));
+ s->MACControlFramesReceived = enetc_port_rd64(hw, ENETC_PM_RCNP(mac));
}
static const struct ethtool_rmon_hist_range enetc_rmon_ranges[] = {
@@ -376,26 +389,26 @@ static const struct ethtool_rmon_hist_range enetc_rmon_ranges[] = {
static void enetc_rmon_stats(struct enetc_hw *hw, int mac,
struct ethtool_rmon_stats *s)
{
- s->undersize_pkts = enetc_port_rd(hw, ENETC_PM_RUND(mac));
- s->oversize_pkts = enetc_port_rd(hw, ENETC_PM_ROVR(mac));
- s->fragments = enetc_port_rd(hw, ENETC_PM_RFRG(mac));
- s->jabbers = enetc_port_rd(hw, ENETC_PM_RJBR(mac));
-
- s->hist[0] = enetc_port_rd(hw, ENETC_PM_R64(mac));
- s->hist[1] = enetc_port_rd(hw, ENETC_PM_R127(mac));
- s->hist[2] = enetc_port_rd(hw, ENETC_PM_R255(mac));
- s->hist[3] = enetc_port_rd(hw, ENETC_PM_R511(mac));
- s->hist[4] = enetc_port_rd(hw, ENETC_PM_R1023(mac));
- s->hist[5] = enetc_port_rd(hw, ENETC_PM_R1522(mac));
- s->hist[6] = enetc_port_rd(hw, ENETC_PM_R1523X(mac));
-
- s->hist_tx[0] = enetc_port_rd(hw, ENETC_PM_T64(mac));
- s->hist_tx[1] = enetc_port_rd(hw, ENETC_PM_T127(mac));
- s->hist_tx[2] = enetc_port_rd(hw, ENETC_PM_T255(mac));
- s->hist_tx[3] = enetc_port_rd(hw, ENETC_PM_T511(mac));
- s->hist_tx[4] = enetc_port_rd(hw, ENETC_PM_T1023(mac));
- s->hist_tx[5] = enetc_port_rd(hw, ENETC_PM_T1522(mac));
- s->hist_tx[6] = enetc_port_rd(hw, ENETC_PM_T1523X(mac));
+ s->undersize_pkts = enetc_port_rd64(hw, ENETC_PM_RUND(mac));
+ s->oversize_pkts = enetc_port_rd64(hw, ENETC_PM_ROVR(mac));
+ s->fragments = enetc_port_rd64(hw, ENETC_PM_RFRG(mac));
+ s->jabbers = enetc_port_rd64(hw, ENETC_PM_RJBR(mac));
+
+ s->hist[0] = enetc_port_rd64(hw, ENETC_PM_R64(mac));
+ s->hist[1] = enetc_port_rd64(hw, ENETC_PM_R127(mac));
+ s->hist[2] = enetc_port_rd64(hw, ENETC_PM_R255(mac));
+ s->hist[3] = enetc_port_rd64(hw, ENETC_PM_R511(mac));
+ s->hist[4] = enetc_port_rd64(hw, ENETC_PM_R1023(mac));
+ s->hist[5] = enetc_port_rd64(hw, ENETC_PM_R1522(mac));
+ s->hist[6] = enetc_port_rd64(hw, ENETC_PM_R1523X(mac));
+
+ s->hist_tx[0] = enetc_port_rd64(hw, ENETC_PM_T64(mac));
+ s->hist_tx[1] = enetc_port_rd64(hw, ENETC_PM_T127(mac));
+ s->hist_tx[2] = enetc_port_rd64(hw, ENETC_PM_T255(mac));
+ s->hist_tx[3] = enetc_port_rd64(hw, ENETC_PM_T511(mac));
+ s->hist_tx[4] = enetc_port_rd64(hw, ENETC_PM_T1023(mac));
+ s->hist_tx[5] = enetc_port_rd64(hw, ENETC_PM_T1522(mac));
+ s->hist_tx[6] = enetc_port_rd64(hw, ENETC_PM_T1523X(mac));
}
static void enetc_get_eth_mac_stats(struct net_device *ndev,
@@ -467,7 +480,8 @@ static void enetc_get_rmon_stats(struct net_device *ndev,
#define ENETC_RSSHASH_L3 (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO | RXH_IP_SRC | \
RXH_IP_DST)
#define ENETC_RSSHASH_L4 (ENETC_RSSHASH_L3 | RXH_L4_B_0_1 | RXH_L4_B_2_3)
-static int enetc_get_rsshash(struct ethtool_rxnfc *rxnfc)
+static int enetc_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *rxnfc)
{
static const u32 rsshash[] = {
[TCP_V4_FLOW] = ENETC_RSSHASH_L4,
@@ -584,9 +598,6 @@ static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc,
case ETHTOOL_GRXRINGS:
rxnfc->data = priv->num_rx_rings;
break;
- case ETHTOOL_GRXFH:
- /* get RSS hash config */
- return enetc_get_rsshash(rxnfc);
case ETHTOOL_GRXCLSRLCNT:
/* total number of entries */
rxnfc->data = priv->si->num_fs_entries;
@@ -639,8 +650,6 @@ static int enetc4_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc
case ETHTOOL_GRXRINGS:
rxnfc->data = priv->num_rx_rings;
break;
- case ETHTOOL_GRXFH:
- return enetc_get_rsshash(rxnfc);
default:
return -EOPNOTSUPP;
}
@@ -1228,6 +1237,7 @@ const struct ethtool_ops enetc_pf_ethtool_ops = {
.get_rxfh_indir_size = enetc_get_rxfh_indir_size,
.get_rxfh = enetc_get_rxfh,
.set_rxfh = enetc_set_rxfh,
+ .get_rxfh_fields = enetc_get_rxfh_fields,
.get_ringparam = enetc_get_ringparam,
.get_coalesce = enetc_get_coalesce,
.set_coalesce = enetc_set_coalesce,
@@ -1258,6 +1268,7 @@ const struct ethtool_ops enetc_vf_ethtool_ops = {
.get_rxfh_indir_size = enetc_get_rxfh_indir_size,
.get_rxfh = enetc_get_rxfh,
.set_rxfh = enetc_set_rxfh,
+ .get_rxfh_fields = enetc_get_rxfh_fields,
.get_ringparam = enetc_get_ringparam,
.get_coalesce = enetc_get_coalesce,
.set_coalesce = enetc_set_coalesce,
@@ -1284,6 +1295,7 @@ const struct ethtool_ops enetc4_pf_ethtool_ops = {
.get_rxfh_indir_size = enetc_get_rxfh_indir_size,
.get_rxfh = enetc_get_rxfh,
.set_rxfh = enetc_set_rxfh,
+ .get_rxfh_fields = enetc_get_rxfh_fields,
};
void enetc_set_ethtool_ops(struct net_device *ndev)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 4098f01479bc..73763e8f4879 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -43,6 +43,9 @@
#define ENETC_SIPMAR0 0x80
#define ENETC_SIPMAR1 0x84
+#define ENETC_SICVLANR1 0x90
+#define ENETC_SICVLANR2 0x94
+#define SICVLANR_ETYPE GENMASK(15, 0)
/* VF-PF Message passing */
#define ENETC_DEFAULT_MSG_SIZE 1024 /* and max size */
@@ -507,7 +510,7 @@ static inline u64 _enetc_rd_reg64(void __iomem *reg)
tmp = ioread32(reg + 4);
} while (high != tmp);
- return le64_to_cpu((__le64)high << 32 | low);
+ return (u64)high << 32 | low;
}
#endif
@@ -533,6 +536,7 @@ static inline u64 _enetc_rd_reg64_wa(void __iomem *reg)
/* port register accessors - PF only */
#define enetc_port_rd(hw, off) enetc_rd_reg((hw)->port + (off))
#define enetc_port_wr(hw, off, val) enetc_wr_reg((hw)->port + (off), val)
+#define enetc_port_rd64(hw, off) _enetc_rd_reg64_wa((hw)->port + (off))
#define enetc_port_rd_mdio(hw, off) _enetc_rd_mdio_reg_wa((hw)->port + (off))
#define enetc_port_wr_mdio(hw, off, val) _enetc_wr_mdio_reg_wa(\
(hw)->port + (off), val)
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index c81f2ea588f2..5c8fdcef759b 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -14,14 +14,14 @@
#define FEC_H
/****************************************************************************/
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <linux/bpf.h>
#include <linux/clocksource.h>
+#include <linux/firmware/imx/sci.h>
#include <linux/net_tstamp.h>
#include <linux/pm_qos.h>
-#include <linux/bpf.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
-#include <dt-bindings/firmware/imx/rsrc.h>
-#include <linux/firmware/imx/sci.h>
#include <net/xdp.h>
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
@@ -115,7 +115,7 @@
#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */
#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */
#define IEEE_T_LCOL 0x25c /* Frames tx'd with late collision */
-#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */
+#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excessive collisions */
#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */
#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */
#define IEEE_T_SQE 0x26c /* Frames tx'd with SQE err */
@@ -342,7 +342,7 @@ struct bufdesc_ex {
#define FEC_TX_BD_FTYPE(X) (((X) & 0xf) << 20)
/* The number of Tx and Rx buffers. These are allocated from the page
- * pool. The code may assume these are power of two, so it it best
+ * pool. The code may assume these are power of two, so it is best
* to keep them that size.
* We don't need to allocate pages for the transmitter. We just use
* the skbuffer directly.
@@ -460,7 +460,7 @@ struct bufdesc_ex {
#define FEC_QUIRK_SINGLE_MDIO (1 << 11)
/* Controller supports RACC register */
#define FEC_QUIRK_HAS_RACC (1 << 12)
-/* Controller supports interrupt coalesc */
+/* Controller supports interrupt coalesce */
#define FEC_QUIRK_HAS_COALESCE (1 << 13)
/* Interrupt doesn't wake CPU from deep idle */
#define FEC_QUIRK_ERR006687 (1 << 14)
@@ -495,7 +495,7 @@ struct bufdesc_ex {
*/
#define FEC_QUIRK_HAS_EEE (1 << 20)
-/* i.MX8QM ENET IP version add new feture to generate delayed TXC/RXC
+/* i.MX8QM ENET IP version add new feature to generate delayed TXC/RXC
* as an alternative option to make sure it works well with various PHYs.
* For the implementation of delayed clock, ENET takes synchronized 250MHz
* clocks to generate 2ns delay.
@@ -614,7 +614,6 @@ struct fec_enet_private {
unsigned int num_tx_queues;
unsigned int num_rx_queues;
- /* The saved address of a sent-in-place packet/buffer, for skfree(). */
struct fec_enet_priv_tx_q *tx_queue[FEC_ENET_MAX_TX_QS];
struct fec_enet_priv_rx_q *rx_queue[FEC_ENET_MAX_RX_QS];
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 17e9bddb9ddd..1383918f8a3f 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -22,56 +22,55 @@
* Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/pm_runtime.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <linux/cacheflush.h>
+#include <linux/clk.h>
+#include <linux/crc32.h>
#include <linux/delay.h>
-#include <linux/netdevice.h>
+#include <linux/errno.h>
#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <net/ip.h>
-#include <net/page_pool/helpers.h>
-#include <net/selftests.h>
-#include <net/tso.h>
-#include <linux/tcp.h>
-#include <linux/udp.h>
+#include <linux/fec.h>
+#include <linux/filter.h>
+#include <linux/gpio/consumer.h>
#include <linux/icmp.h>
-#include <linux/spinlock.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/ip.h>
#include <linux/irq.h>
-#include <linux/clk.h>
-#include <linux/crc32.h>
-#include <linux/platform_device.h>
-#include <linux/property.h>
+#include <linux/kernel.h>
#include <linux/mdio.h>
-#include <linux/phy.h>
-#include <linux/fec.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/regulator/consumer.h>
-#include <linux/if_vlan.h>
+#include <linux/phy.h>
#include <linux/pinctrl/consumer.h>
-#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/prefetch.h>
-#include <linux/mfd/syscon.h>
+#include <linux/property.h>
+#include <linux/ptrace.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/workqueue.h>
+#include <net/ip.h>
+#include <net/page_pool/helpers.h>
+#include <net/selftests.h>
+#include <net/tso.h>
#include <soc/imx/cpuidle.h>
-#include <linux/filter.h>
-#include <linux/bpf.h>
-#include <linux/bpf_trace.h>
-
-#include <asm/cacheflush.h>
#include "fec.h"
@@ -131,7 +130,7 @@ static const struct fec_devinfo fec_mvf600_info = {
FEC_QUIRK_HAS_MDIO_C45,
};
-static const struct fec_devinfo fec_imx6x_info = {
+static const struct fec_devinfo fec_imx6sx_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
@@ -196,7 +195,7 @@ static const struct of_device_id fec_dt_ids[] = {
{ .compatible = "fsl,imx28-fec", .data = &fec_imx28_info, },
{ .compatible = "fsl,imx6q-fec", .data = &fec_imx6q_info, },
{ .compatible = "fsl,mvf600-fec", .data = &fec_mvf600_info, },
- { .compatible = "fsl,imx6sx-fec", .data = &fec_imx6x_info, },
+ { .compatible = "fsl,imx6sx-fec", .data = &fec_imx6sx_info, },
{ .compatible = "fsl,imx6ul-fec", .data = &fec_imx6ul_info, },
{ .compatible = "fsl,imx8mq-fec", .data = &fec_imx8mq_info, },
{ .compatible = "fsl,imx8qm-fec", .data = &fec_imx8qm_info, },
@@ -276,16 +275,19 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_ECR_MAGICEN BIT(2)
#define FEC_ECR_SLEEP BIT(3)
#define FEC_ECR_EN1588 BIT(4)
+#define FEC_ECR_SPEED BIT(5)
#define FEC_ECR_BYTESWP BIT(8)
/* FEC RCR bits definition */
#define FEC_RCR_LOOP BIT(0)
-#define FEC_RCR_HALFDPX BIT(1)
+#define FEC_RCR_DRT BIT(1)
#define FEC_RCR_MII BIT(2)
#define FEC_RCR_PROMISC BIT(3)
#define FEC_RCR_BC_REJ BIT(4)
#define FEC_RCR_FLOWCTL BIT(5)
+#define FEC_RCR_RGMII BIT(6)
#define FEC_RCR_RMII BIT(8)
#define FEC_RCR_10BASET BIT(9)
+#define FEC_RCR_NLC BIT(30)
/* TX WMARK bits */
#define FEC_TXWMRK_STRFWD BIT(8)
@@ -1043,7 +1045,9 @@ static void fec_enet_bd_init(struct net_device *dev)
struct page *page = txq->tx_buf[i].buf_p;
if (page)
- page_pool_put_page(page->pp, page, 0, false);
+ page_pool_put_page(pp_page_to_nmdesc(page)->pp,
+ page, 0,
+ false);
}
txq->tx_buf[i].buf_p = NULL;
@@ -1121,6 +1125,17 @@ static void fec_ctrl_reset(struct fec_enet_private *fep, bool allow_wol)
}
}
+static void fec_set_hw_mac_addr(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
+ (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
+ fep->hwp + FEC_ADDR_LOW);
+ writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
+ fep->hwp + FEC_ADDR_HIGH);
+}
+
/*
* This function is called to start or restart the FEC during a link
* change, transmit timeout, or to reconfigure the FEC. The network
@@ -1130,8 +1145,7 @@ static void
fec_restart(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- u32 temp_mac[2];
- u32 rcntl = OPT_FRAME_SIZE | 0x04;
+ u32 rcntl = OPT_FRAME_SIZE | FEC_RCR_MII;
u32 ecntl = FEC_ECR_ETHEREN;
if (fep->bufdesc_ex)
@@ -1143,11 +1157,7 @@ fec_restart(struct net_device *ndev)
* enet-mac reset will reset mac address registers too,
* so need to reconfigure it.
*/
- memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
- writel((__force u32)cpu_to_be32(temp_mac[0]),
- fep->hwp + FEC_ADDR_LOW);
- writel((__force u32)cpu_to_be32(temp_mac[1]),
- fep->hwp + FEC_ADDR_HIGH);
+ fec_set_hw_mac_addr(ndev);
/* Clear any outstanding interrupt, except MDIO. */
writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT);
@@ -1162,7 +1172,7 @@ fec_restart(struct net_device *ndev)
writel(0x04, fep->hwp + FEC_X_CNTRL);
} else {
/* No Rcv on Xmit */
- rcntl |= 0x02;
+ rcntl |= FEC_RCR_DRT;
writel(0x0, fep->hwp + FEC_X_CNTRL);
}
@@ -1191,14 +1201,11 @@ fec_restart(struct net_device *ndev)
*/
if (fep->quirks & FEC_QUIRK_ENET_MAC) {
/* Enable flow control and length check */
- rcntl |= 0x40000000 | 0x00000020;
+ rcntl |= FEC_RCR_NLC | FEC_RCR_FLOWCTL;
/* RGMII, RMII or MII */
- if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
- fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
- fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
- fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
- rcntl |= (1 << 6);
+ if (phy_interface_mode_is_rgmii(fep->phy_interface))
+ rcntl |= FEC_RCR_RGMII;
else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
rcntl |= FEC_RCR_RMII;
else
@@ -1207,7 +1214,7 @@ fec_restart(struct net_device *ndev)
/* 1G, 100M or 10M */
if (ndev->phydev) {
if (ndev->phydev->speed == SPEED_1000)
- ecntl |= (1 << 5);
+ ecntl |= FEC_ECR_SPEED;
else if (ndev->phydev->speed == SPEED_100)
rcntl &= ~FEC_RCR_10BASET;
else
@@ -1581,7 +1588,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
xdp_return_frame_rx_napi(xdpf);
} else { /* recycle pages of XDP_TX frames */
/* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */
- page_pool_put_page(page->pp, page, 0, true);
+ page_pool_put_page(pp_page_to_nmdesc(page)->pp, page,
+ 0, true);
}
txq->tx_buf[index].buf_p = NULL;
@@ -1706,13 +1714,29 @@ xdp_err:
return ret;
}
+static void fec_enet_rx_vlan(const struct net_device *ndev, struct sk_buff *skb)
+{
+ if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ const struct vlan_ethhdr *vlan_header = skb_vlan_eth_hdr(skb);
+ const u16 vlan_tag = ntohs(vlan_header->h_vlan_TCI);
+
+ /* Push and remove the vlan tag */
+
+ memmove(skb->data + VLAN_HLEN, skb->data, ETH_ALEN * 2);
+ skb_pull(skb, VLAN_HLEN);
+ __vlan_hwaccel_put_tag(skb,
+ htons(ETH_P_8021Q),
+ vlan_tag);
+ }
+}
+
/* During a receive, the bd_rx.cur points to the current incoming buffer.
* When we update through the ring, if the next incoming buffer has
* not been given to the system, we just set the empty indicator,
* effectively tossing the packet.
*/
static int
-fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
+fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct fec_enet_priv_rx_q *rxq;
@@ -1720,11 +1744,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
unsigned short status;
struct sk_buff *skb;
ushort pkt_len;
- __u8 *data;
int pkt_received = 0;
struct bufdesc_ex *ebdp = NULL;
- bool vlan_packet_rcvd = false;
- u16 vlan_tag;
int index = 0;
bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
@@ -1843,10 +1864,11 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
skb_mark_for_recycle(skb);
if (unlikely(need_swap)) {
+ u8 *data;
+
data = page_address(page) + FEC_ENET_XDP_HEADROOM;
swap_buffer(data, pkt_len);
}
- data = skb->data;
/* Extract the enhanced buffer descriptor */
ebdp = NULL;
@@ -1854,20 +1876,9 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
ebdp = (struct bufdesc_ex *)bdp;
/* If this is a VLAN packet remove the VLAN Tag */
- vlan_packet_rcvd = false;
- if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- fep->bufdesc_ex &&
- (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
- /* Push and remove the vlan tag */
- struct vlan_hdr *vlan_header =
- (struct vlan_hdr *) (data + ETH_HLEN);
- vlan_tag = ntohs(vlan_header->h_vlan_TCI);
-
- vlan_packet_rcvd = true;
-
- memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
- skb_pull(skb, VLAN_HLEN);
- }
+ if (fep->bufdesc_ex &&
+ (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN)))
+ fec_enet_rx_vlan(ndev, skb);
skb->protocol = eth_type_trans(skb, ndev);
@@ -1886,12 +1897,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
}
}
- /* Handle received VLAN packets */
- if (vlan_packet_rcvd)
- __vlan_hwaccel_put_tag(skb,
- htons(ETH_P_8021Q),
- vlan_tag);
-
skb_record_rx_queue(skb, queue_id);
napi_gro_receive(&fep->napi, skb);
@@ -1939,7 +1944,7 @@ static int fec_enet_rx(struct net_device *ndev, int budget)
/* Make sure that AVB queues are processed first. */
for (i = fep->num_rx_queues - 1; i >= 0; i--)
- done += fec_enet_rx_queue(ndev, budget - done, i);
+ done += fec_enet_rx_queue(ndev, i, budget - done);
return done;
}
@@ -3124,27 +3129,25 @@ static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
static void fec_enet_itr_coal_set(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- int rx_itr, tx_itr;
+ u32 rx_itr = 0, tx_itr = 0;
+ int rx_ictt, tx_ictt;
- /* Must be greater than zero to avoid unpredictable behavior */
- if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
- !fep->tx_time_itr || !fep->tx_pkts_itr)
- return;
-
- /* Select enet system clock as Interrupt Coalescing
- * timer Clock Source
- */
- rx_itr = FEC_ITR_CLK_SEL;
- tx_itr = FEC_ITR_CLK_SEL;
+ rx_ictt = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
+ tx_ictt = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
- /* set ICFT and ICTT */
- rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
- rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr));
- tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
- tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr));
+ if (rx_ictt > 0 && fep->rx_pkts_itr > 1) {
+ /* Enable with enet system clock as Interrupt Coalescing timer Clock Source */
+ rx_itr = FEC_ITR_EN | FEC_ITR_CLK_SEL;
+ rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
+ rx_itr |= FEC_ITR_ICTT(rx_ictt);
+ }
- rx_itr |= FEC_ITR_EN;
- tx_itr |= FEC_ITR_EN;
+ if (tx_ictt > 0 && fep->tx_pkts_itr > 1) {
+ /* Enable with enet system clock as Interrupt Coalescing timer Clock Source */
+ tx_itr = FEC_ITR_EN | FEC_ITR_CLK_SEL;
+ tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
+ tx_itr |= FEC_ITR_ICTT(tx_ictt);
+ }
writel(tx_itr, fep->hwp + FEC_TXIC0);
writel(rx_itr, fep->hwp + FEC_RXIC0);
@@ -3348,7 +3351,8 @@ static void fec_enet_free_buffers(struct net_device *ndev)
} else {
struct page *page = txq->tx_buf[i].buf_p;
- page_pool_put_page(page->pp, page, 0, false);
+ page_pool_put_page(pp_page_to_nmdesc(page)->pp,
+ page, 0, false);
}
txq->tx_buf[i].buf_p = NULL;
@@ -3699,7 +3703,6 @@ static void set_multicast_list(struct net_device *ndev)
static int
fec_set_mac_address(struct net_device *ndev, void *p)
{
- struct fec_enet_private *fep = netdev_priv(ndev);
struct sockaddr *addr = p;
if (addr) {
@@ -3716,11 +3719,8 @@ fec_set_mac_address(struct net_device *ndev, void *p)
if (!netif_running(ndev))
return 0;
- writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
- (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
- fep->hwp + FEC_ADDR_LOW);
- writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
- fep->hwp + FEC_ADDR_HIGH);
+ fec_set_hw_mac_addr(ndev);
+
return 0;
}
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 2bfaf14f65c8..3fc29afc9854 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -619,7 +619,7 @@ static void mpc52xx_fec_hw_init(struct net_device *dev)
out_be32(&fec->rfifo_alarm, 0x0000030c);
out_be32(&fec->tfifo_alarm, 0x00000100);
- /* begin transmittion when 256 bytes are in FIFO (or EOF or FIFO full) */
+ /* begin transmission when 256 bytes are in FIFO (or EOF or FIFO full) */
out_be32(&fec->x_wmrk, FEC_FIFO_WMRK_256B);
/* enable crc generation */
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 876d90832596..fa88b47d526c 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -7,30 +7,30 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/netdevice.h>
+#include <linux/errno.h>
#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
+#include <linux/fec.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/ioport.h>
#include <linux/irq.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/phy.h>
-#include <linux/fec.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_net.h>
+#include <linux/pci.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/ptrace.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
#include "fec.h"
@@ -96,7 +96,7 @@
* cyclecounter structure used to construct a ns counter from the
* arbitrary fixed point registers
*/
-static u64 fec_ptp_read(const struct cyclecounter *cc)
+static u64 fec_ptp_read(struct cyclecounter *cc)
{
struct fec_enet_private *fep =
container_of(cc, struct fec_enet_private, cc);
@@ -117,7 +117,7 @@ static u64 fec_ptp_read(const struct cyclecounter *cc)
* @fep: the fec_enet_private structure handle
* @enable: enable the channel pps output
*
- * This function enble the PPS ouput on the timer channel.
+ * This function enables the PPS output on the timer channel.
*/
static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
{
@@ -172,7 +172,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
* very close to the second point, which means NSEC_PER_SEC
* - ts.tv_nsec is close to be zero(For example 20ns); Since the timer
* is still running when we calculate the first compare event, it is
- * possible that the remaining nanoseonds run out before the compare
+ * possible that the remaining nanoseconds run out before the compare
* counter is calculated and written into TCCR register. To avoid
* this possibility, we will set the compare event to be the next
* of next second. The current setting is 31-bit timer and wrap
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 3925441143fa..0291093f2e4e 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -1225,7 +1225,7 @@ int memac_initialization(struct mac_device *mac_dev,
* be careful and not enable this if we are using MII or RGMII, since
* those configurations modes don't use in-band autonegotiation.
*/
- if (!of_property_read_bool(mac_node, "managed") &&
+ if (!of_property_present(mac_node, "managed") &&
mac_dev->phy_if != PHY_INTERFACE_MODE_MII &&
!phy_interface_mode_is_rgmii(mac_dev->phy_if))
mac_dev->phylink_config.default_an_inband = true;
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 56d2f79fb7e3..577f9b1780ad 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -491,8 +491,8 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
err = of_mdiobus_register(new_bus, np);
if (err) {
- dev_err(&pdev->dev, "cannot register %s as MDIO bus\n",
- new_bus->name);
+ dev_err_probe(&pdev->dev, err, "cannot register %s as MDIO bus\n",
+ new_bus->name);
goto error;
}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index bcbcad613512..7c0f049f0938 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -97,6 +97,7 @@
#include <linux/phy_fixed.h>
#include <linux/of.h>
#include <linux/of_net.h>
+#include <linux/property.h>
#include "gianfar.h"
@@ -571,18 +572,6 @@ static int gfar_parse_group(struct device_node *np,
return 0;
}
-static int gfar_of_group_count(struct device_node *np)
-{
- struct device_node *child;
- int num = 0;
-
- for_each_available_child_of_node(np, child)
- if (of_node_name_eq(child, "queue-group"))
- num++;
-
- return num;
-}
-
/* Reads the controller's registers to determine what interface
* connects it to the PHY.
*/
@@ -654,8 +643,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
num_rx_qs = 1;
} else { /* MQ_MG_MODE */
/* get the actual number of supported groups */
- unsigned int num_grps = gfar_of_group_count(np);
+ unsigned int num_grps;
+ num_grps = device_get_named_child_node_count(&ofdev->dev,
+ "queue-group");
if (num_grps == 0 || num_grps > MAXGROUPS) {
dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
num_grps);
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 781d92e703cb..28f53cf2a174 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -781,14 +781,26 @@ err:
return ret;
}
-static int gfar_set_hash_opts(struct gfar_private *priv,
- struct ethtool_rxnfc *cmd)
+static int gfar_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
+ struct gfar_private *priv = netdev_priv(dev);
+ int ret;
+
+ if (test_bit(GFAR_RESETTING, &priv->state))
+ return -EBUSY;
+
+ mutex_lock(&priv->rx_queue_access);
+
+ ret = 0;
/* write the filer rules here */
if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
- return -EINVAL;
+ ret = -EINVAL;
- return 0;
+ mutex_unlock(&priv->rx_queue_access);
+
+ return ret;
}
static int gfar_check_filer_hardware(struct gfar_private *priv)
@@ -1398,9 +1410,6 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
mutex_lock(&priv->rx_queue_access);
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = gfar_set_hash_opts(priv, cmd);
- break;
case ETHTOOL_SRXCLSRLINS:
if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
cmd->fs.ring_cookie >= priv->num_rx_queues) ||
@@ -1508,6 +1517,7 @@ const struct ethtool_ops gfar_ethtool_ops = {
#endif
.set_rxnfc = gfar_set_nfc,
.get_rxnfc = gfar_get_nfc,
+ .set_rxfh_fields = gfar_set_rxfh_fields,
.get_ts_info = gfar_get_ts_info,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
diff --git a/drivers/net/ethernet/google/Kconfig b/drivers/net/ethernet/google/Kconfig
index 564862a57124..14c9431e15e5 100644
--- a/drivers/net/ethernet/google/Kconfig
+++ b/drivers/net/ethernet/google/Kconfig
@@ -18,6 +18,7 @@ if NET_VENDOR_GOOGLE
config GVE
tristate "Google Virtual NIC (gVNIC) support"
depends on (PCI_MSI && (X86 || CPU_LITTLE_ENDIAN))
+ depends on PTP_1588_CLOCK_OPTIONAL
select PAGE_POOL
help
This driver supports Google Virtual NIC (gVNIC)"
diff --git a/drivers/net/ethernet/google/gve/Makefile b/drivers/net/ethernet/google/gve/Makefile
index 4520f1c07a63..e0ec227a50f7 100644
--- a/drivers/net/ethernet/google/gve/Makefile
+++ b/drivers/net/ethernet/google/gve/Makefile
@@ -1,5 +1,7 @@
# Makefile for the Google virtual Ethernet (gve) driver
obj-$(CONFIG_GVE) += gve.o
-gve-objs := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o gve_flow_rule.o \
+gve-y := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o gve_flow_rule.o \
gve_buffer_mgmt_dqo.o
+
+gve-$(CONFIG_PTP_1588_CLOCK) += gve_ptp.o
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 2fab38c8ee78..bceaf9b05cb4 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -11,7 +11,9 @@
#include <linux/dmapool.h>
#include <linux/ethtool_netlink.h>
#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
#include <linux/pci.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/u64_stats_sync.h>
#include <net/page_pool/helpers.h>
#include <net/xdp.h>
@@ -188,6 +190,9 @@ struct gve_rx_buf_state_dqo {
/* The page posted to HW. */
struct gve_rx_slot_page_info page_info;
+ /* XSK buffer */
+ struct xdp_buff *xsk_buff;
+
/* The DMA address corresponding to `page_info`. */
dma_addr_t addr;
@@ -329,7 +334,6 @@ struct gve_rx_ring {
/* XDP stuff */
struct xdp_rxq_info xdp_rxq;
- struct xdp_rxq_info xsk_rxq;
struct xsk_buff_pool *xsk_pool;
struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */
};
@@ -398,10 +402,24 @@ enum gve_packet_state {
GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
/* No valid completion received within the specified timeout. */
GVE_PACKET_STATE_TIMED_OUT_COMPL,
+ /* XSK pending packet has received a packet/reinjection completion, or
+ * has timed out. At this point, the pending packet can be counted by
+ * xsk_tx_complete and freed.
+ */
+ GVE_PACKET_STATE_XSK_COMPLETE,
+};
+
+enum gve_tx_pending_packet_dqo_type {
+ GVE_TX_PENDING_PACKET_DQO_SKB,
+ GVE_TX_PENDING_PACKET_DQO_XDP_FRAME,
+ GVE_TX_PENDING_PACKET_DQO_XSK,
};
struct gve_tx_pending_packet_dqo {
- struct sk_buff *skb; /* skb for this packet */
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
/* 0th element corresponds to the linear portion of `skb`, should be
* unmapped with `dma_unmap_single`.
@@ -431,7 +449,10 @@ struct gve_tx_pending_packet_dqo {
/* Identifies the current state of the packet as defined in
* `enum gve_packet_state`.
*/
- u8 state;
+ u8 state : 3;
+
+ /* gve_tx_pending_packet_dqo_type */
+ u8 type : 2;
/* If packet is an outstanding miss completion, then the packet is
* freed if the corresponding re-injection completion is not received
@@ -453,6 +474,9 @@ struct gve_tx_ring {
/* DQO fields. */
struct {
+ /* Spinlock for XDP tx traffic */
+ spinlock_t xdp_lock;
+
/* Linked list of gve_tx_pending_packet_dqo. Index into
* pending_packets, or -1 if empty.
*
@@ -497,6 +521,8 @@ struct gve_tx_ring {
/* Cached value of `dqo_compl.free_tx_qpl_buf_cnt` */
u32 free_tx_qpl_buf_cnt;
};
+
+ atomic_t xsk_reorder_queue_tail;
} dqo_tx;
};
@@ -530,6 +556,9 @@ struct gve_tx_ring {
/* Last TX ring index fetched by HW */
atomic_t hw_tx_head;
+ u16 xsk_reorder_queue_head;
+ u16 xsk_reorder_queue_tail;
+
/* List to track pending packets which received a miss
* completion but not a corresponding reinjection.
*/
@@ -583,6 +612,8 @@ struct gve_tx_ring {
struct gve_tx_pending_packet_dqo *pending_packets;
s16 num_pending_packets;
+ u16 *xsk_reorder_queue;
+
u32 complq_mask; /* complq size is complq_mask + 1 */
/* QPL fields */
@@ -750,6 +781,12 @@ struct gve_rss_config {
u32 *hash_lut;
};
+struct gve_ptp {
+ struct ptp_clock_info info;
+ struct ptp_clock *clock;
+ struct gve_priv *priv;
+};
+
struct gve_priv {
struct net_device *dev;
struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
@@ -781,7 +818,9 @@ struct gve_priv {
struct gve_tx_queue_config tx_cfg;
struct gve_rx_queue_config rx_cfg;
- u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
+ unsigned long *xsk_pools; /* bitmap of RX queues with XSK pools */
+ u32 num_ntfy_blks; /* split between TX and RX so must be even */
+ int numa_node;
struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
__be32 __iomem *db_bar2; /* "array" of doorbells */
@@ -813,6 +852,7 @@ struct gve_priv {
u32 adminq_set_driver_parameter_cnt;
u32 adminq_report_stats_cnt;
u32 adminq_report_link_speed_cnt;
+ u32 adminq_report_nic_timestamp_cnt;
u32 adminq_get_ptype_map_cnt;
u32 adminq_verify_driver_compatibility_cnt;
u32 adminq_query_flow_rules_cnt;
@@ -870,6 +910,14 @@ struct gve_priv {
u16 rss_lut_size;
bool cache_rss_config;
struct gve_rss_config rss_config;
+
+ /* True if the device supports reading the nic clock */
+ bool nic_timestamp_supported;
+ struct gve_ptp *ptp;
+ struct kernel_hwtstamp_config ts_config;
+ struct gve_nic_ts_report *nic_ts_report;
+ dma_addr_t nic_ts_report_bus;
+ u64 last_sync_nic_counter; /* Clock counter from last NIC TS report */
};
enum gve_service_task_flags_bit {
@@ -1138,6 +1186,7 @@ static inline bool gve_supports_xdp_xmit(struct gve_priv *priv)
{
switch (priv->queue_format) {
case GVE_GQI_QPL_FORMAT:
+ case GVE_DQO_RDA_FORMAT:
return true;
default:
return false;
@@ -1161,11 +1210,15 @@ void gve_free_queue_page_list(struct gve_priv *priv,
u32 id);
/* tx handling */
netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
-int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
- u32 flags);
+int gve_xdp_xmit_gqi(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
+int gve_xdp_xmit_dqo(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
void *data, int len, void *frame_p);
void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
+int gve_xdp_xmit_one_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
+ struct xdp_frame *xdpf);
bool gve_tx_poll(struct gve_notify_block *block, int budget);
bool gve_xdp_poll(struct gve_notify_block *block, int budget);
int gve_xsk_tx_poll(struct gve_notify_block *block, int budget);
@@ -1249,6 +1302,24 @@ int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
int gve_flow_rules_reset(struct gve_priv *priv);
/* RSS config */
int gve_init_rss_config(struct gve_priv *priv, u16 num_queues);
+/* PTP and timestamping */
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+int gve_clock_nic_ts_read(struct gve_priv *priv);
+int gve_init_clock(struct gve_priv *priv);
+void gve_teardown_clock(struct gve_priv *priv);
+#else /* CONFIG_PTP_1588_CLOCK */
+static inline int gve_clock_nic_ts_read(struct gve_priv *priv)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int gve_init_clock(struct gve_priv *priv)
+{
+ return 0;
+}
+
+static inline void gve_teardown_clock(struct gve_priv *priv) { }
+#endif /* CONFIG_PTP_1588_CLOCK */
/* report stats handling */
void gve_handle_report_stats(struct gve_priv *priv);
/* exported by ethtool.c */
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 3e8fc33cc11f..4f33d094a2ef 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -46,6 +46,7 @@ void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
struct gve_device_option_flow_steering **dev_op_flow_steering,
struct gve_device_option_rss_config **dev_op_rss_config,
+ struct gve_device_option_nic_timestamp **dev_op_nic_timestamp,
struct gve_device_option_modify_ring **dev_op_modify_ring)
{
u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
@@ -225,6 +226,23 @@ void gve_parse_device_option(struct gve_priv *priv,
"RSS config");
*dev_op_rss_config = (void *)(option + 1);
break;
+ case GVE_DEV_OPT_ID_NIC_TIMESTAMP:
+ if (option_length < sizeof(**dev_op_nic_timestamp) ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_NIC_TIMESTAMP) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "Nic Timestamp",
+ (int)sizeof(**dev_op_nic_timestamp),
+ GVE_DEV_OPT_REQ_FEAT_MASK_NIC_TIMESTAMP,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_nic_timestamp))
+ dev_warn(&priv->pdev->dev,
+ GVE_DEVICE_OPTION_TOO_BIG_FMT,
+ "Nic Timestamp");
+ *dev_op_nic_timestamp = (void *)(option + 1);
+ break;
default:
/* If we don't recognize the option just continue
* without doing anything.
@@ -246,6 +264,7 @@ gve_process_device_options(struct gve_priv *priv,
struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
struct gve_device_option_flow_steering **dev_op_flow_steering,
struct gve_device_option_rss_config **dev_op_rss_config,
+ struct gve_device_option_nic_timestamp **dev_op_nic_timestamp,
struct gve_device_option_modify_ring **dev_op_modify_ring)
{
const int num_options = be16_to_cpu(descriptor->num_device_options);
@@ -269,6 +288,7 @@ gve_process_device_options(struct gve_priv *priv,
dev_op_dqo_rda, dev_op_jumbo_frames,
dev_op_dqo_qpl, dev_op_buffer_sizes,
dev_op_flow_steering, dev_op_rss_config,
+ dev_op_nic_timestamp,
dev_op_modify_ring);
dev_opt = next_opt;
}
@@ -306,6 +326,7 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
priv->adminq_set_driver_parameter_cnt = 0;
priv->adminq_report_stats_cnt = 0;
priv->adminq_report_link_speed_cnt = 0;
+ priv->adminq_report_nic_timestamp_cnt = 0;
priv->adminq_get_ptype_map_cnt = 0;
priv->adminq_query_flow_rules_cnt = 0;
priv->adminq_cfg_flow_rule_cnt = 0;
@@ -442,6 +463,8 @@ static int gve_adminq_kick_and_wait(struct gve_priv *priv)
int tail, head;
int i;
+ lockdep_assert_held(&priv->adminq_lock);
+
tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
head = priv->adminq_prod_cnt;
@@ -467,9 +490,6 @@ static int gve_adminq_kick_and_wait(struct gve_priv *priv)
return 0;
}
-/* This function is not threadsafe - the caller is responsible for any
- * necessary locks.
- */
static int gve_adminq_issue_cmd(struct gve_priv *priv,
union gve_adminq_command *cmd_orig)
{
@@ -477,6 +497,8 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
u32 opcode;
u32 tail;
+ lockdep_assert_held(&priv->adminq_lock);
+
tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
// Check if next command will overflow the buffer.
@@ -544,6 +566,9 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
case GVE_ADMINQ_REPORT_LINK_SPEED:
priv->adminq_report_link_speed_cnt++;
break;
+ case GVE_ADMINQ_REPORT_NIC_TIMESTAMP:
+ priv->adminq_report_nic_timestamp_cnt++;
+ break;
case GVE_ADMINQ_GET_PTYPE_MAP:
priv->adminq_get_ptype_map_cnt++;
break;
@@ -564,6 +589,7 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
break;
default:
dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
+ return -EINVAL;
}
return 0;
@@ -625,7 +651,7 @@ static int gve_adminq_execute_extended_cmd(struct gve_priv *priv, u32 opcode,
/* The device specifies that the management vector can either be the first irq
* or the last irq. ntfy_blk_msix_base_idx indicates the first irq assigned to
- * the ntfy blks. It if is 0 then the management vector is last, if it is 1 then
+ * the ntfy blks. If it is 0 then the management vector is last, if it is 1 then
* the management vector is first.
*
* gve arranges the msix vectors so that the management vector is last.
@@ -709,13 +735,19 @@ int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_que
int err;
int i;
+ mutex_lock(&priv->adminq_lock);
+
for (i = start_id; i < start_id + num_queues; i++) {
err = gve_adminq_create_tx_queue(priv, i);
if (err)
- return err;
+ goto out;
}
- return gve_adminq_kick_and_wait(priv);
+ err = gve_adminq_kick_and_wait(priv);
+
+out:
+ mutex_unlock(&priv->adminq_lock);
+ return err;
}
static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
@@ -788,13 +820,19 @@ int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
int err;
int i;
+ mutex_lock(&priv->adminq_lock);
+
for (i = 0; i < num_queues; i++) {
err = gve_adminq_create_rx_queue(priv, i);
if (err)
- return err;
+ goto out;
}
- return gve_adminq_kick_and_wait(priv);
+ err = gve_adminq_kick_and_wait(priv);
+
+out:
+ mutex_unlock(&priv->adminq_lock);
+ return err;
}
static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
@@ -820,13 +858,19 @@ int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_qu
int err;
int i;
+ mutex_lock(&priv->adminq_lock);
+
for (i = start_id; i < start_id + num_queues; i++) {
err = gve_adminq_destroy_tx_queue(priv, i);
if (err)
- return err;
+ goto out;
}
- return gve_adminq_kick_and_wait(priv);
+ err = gve_adminq_kick_and_wait(priv);
+
+out:
+ mutex_unlock(&priv->adminq_lock);
+ return err;
}
static void gve_adminq_make_destroy_rx_queue_cmd(union gve_adminq_command *cmd,
@@ -861,13 +905,19 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
int err;
int i;
+ mutex_lock(&priv->adminq_lock);
+
for (i = 0; i < num_queues; i++) {
err = gve_adminq_destroy_rx_queue(priv, i);
if (err)
- return err;
+ goto out;
}
- return gve_adminq_kick_and_wait(priv);
+ err = gve_adminq_kick_and_wait(priv);
+
+out:
+ mutex_unlock(&priv->adminq_lock);
+ return err;
}
static void gve_set_default_desc_cnt(struct gve_priv *priv,
@@ -904,6 +954,8 @@ static void gve_enable_supported_features(struct gve_priv *priv,
*dev_op_flow_steering,
const struct gve_device_option_rss_config
*dev_op_rss_config,
+ const struct gve_device_option_nic_timestamp
+ *dev_op_nic_timestamp,
const struct gve_device_option_modify_ring
*dev_op_modify_ring)
{
@@ -980,10 +1032,15 @@ static void gve_enable_supported_features(struct gve_priv *priv,
"RSS device option enabled with key size of %u, lut size of %u.\n",
priv->rss_key_size, priv->rss_lut_size);
}
+
+ if (dev_op_nic_timestamp &&
+ (supported_features_mask & GVE_SUP_NIC_TIMESTAMP_MASK))
+ priv->nic_timestamp_supported = true;
}
int gve_adminq_describe_device(struct gve_priv *priv)
{
+ struct gve_device_option_nic_timestamp *dev_op_nic_timestamp = NULL;
struct gve_device_option_flow_steering *dev_op_flow_steering = NULL;
struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL;
struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
@@ -1024,6 +1081,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
&dev_op_buffer_sizes,
&dev_op_flow_steering,
&dev_op_rss_config,
+ &dev_op_nic_timestamp,
&dev_op_modify_ring);
if (err)
goto free_device_descriptor;
@@ -1088,7 +1146,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
gve_enable_supported_features(priv, supported_features_mask,
dev_op_jumbo_frames, dev_op_dqo_qpl,
dev_op_buffer_sizes, dev_op_flow_steering,
- dev_op_rss_config, dev_op_modify_ring);
+ dev_op_rss_config, dev_op_nic_timestamp,
+ dev_op_modify_ring);
free_device_descriptor:
dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
@@ -1200,6 +1259,22 @@ int gve_adminq_report_link_speed(struct gve_priv *priv)
return err;
}
+int gve_adminq_report_nic_ts(struct gve_priv *priv,
+ dma_addr_t nic_ts_report_addr)
+{
+ union gve_adminq_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_NIC_TIMESTAMP);
+ cmd.report_nic_ts = (struct gve_adminq_report_nic_ts) {
+ .nic_ts_report_len =
+ cpu_to_be64(sizeof(struct gve_nic_ts_report)),
+ .nic_ts_report_addr = cpu_to_be64(nic_ts_report_addr),
+ };
+
+ return gve_adminq_execute_cmd(priv, &cmd);
+}
+
int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
struct gve_ptype_lut *ptype_lut)
{
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 228217458275..22a74b6aa17e 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -27,6 +27,7 @@ enum gve_adminq_opcodes {
GVE_ADMINQ_GET_PTYPE_MAP = 0xE,
GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY = 0xF,
GVE_ADMINQ_QUERY_FLOW_RULES = 0x10,
+ GVE_ADMINQ_REPORT_NIC_TIMESTAMP = 0x11,
GVE_ADMINQ_QUERY_RSS = 0x12,
/* For commands that are larger than 56 bytes */
@@ -174,6 +175,12 @@ struct gve_device_option_rss_config {
static_assert(sizeof(struct gve_device_option_rss_config) == 8);
+struct gve_device_option_nic_timestamp {
+ __be32 supported_features_mask;
+};
+
+static_assert(sizeof(struct gve_device_option_nic_timestamp) == 4);
+
/* Terminology:
*
* RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
@@ -192,6 +199,7 @@ enum gve_dev_opt_id {
GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
GVE_DEV_OPT_ID_FLOW_STEERING = 0xb,
+ GVE_DEV_OPT_ID_NIC_TIMESTAMP = 0xd,
GVE_DEV_OPT_ID_RSS_CONFIG = 0xe,
};
@@ -206,6 +214,7 @@ enum gve_dev_opt_req_feat_mask {
GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_RSS_CONFIG = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_NIC_TIMESTAMP = 0x0,
};
enum gve_sup_feature_mask {
@@ -214,6 +223,7 @@ enum gve_sup_feature_mask {
GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
GVE_SUP_FLOW_STEERING_MASK = 1 << 5,
GVE_SUP_RSS_CONFIG_MASK = 1 << 7,
+ GVE_SUP_NIC_TIMESTAMP_MASK = 1 << 8,
};
#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
@@ -392,6 +402,21 @@ struct gve_adminq_report_link_speed {
static_assert(sizeof(struct gve_adminq_report_link_speed) == 8);
+struct gve_adminq_report_nic_ts {
+ __be64 nic_ts_report_len;
+ __be64 nic_ts_report_addr;
+};
+
+static_assert(sizeof(struct gve_adminq_report_nic_ts) == 16);
+
+struct gve_nic_ts_report {
+ __be64 nic_timestamp; /* NIC clock in nanoseconds */
+ __be64 reserved1;
+ __be64 reserved2;
+ __be64 reserved3;
+ __be64 reserved4;
+};
+
struct stats {
__be32 stat_name;
__be32 queue_id;
@@ -451,7 +476,7 @@ struct gve_ptype_entry {
};
struct gve_ptype_map {
- struct gve_ptype_entry ptypes[1 << 10]; /* PTYPES are always 10 bits. */
+ struct gve_ptype_entry ptypes[GVE_NUM_PTYPES]; /* PTYPES are always 10 bits. */
};
struct gve_adminq_get_ptype_map {
@@ -585,6 +610,7 @@ union gve_adminq_command {
struct gve_adminq_query_flow_rules query_flow_rules;
struct gve_adminq_configure_rss configure_rss;
struct gve_adminq_query_rss query_rss;
+ struct gve_adminq_report_nic_ts report_nic_ts;
struct gve_adminq_extended_command extended_command;
};
};
@@ -624,6 +650,8 @@ int gve_adminq_reset_flow_rules(struct gve_priv *priv);
int gve_adminq_query_flow_rules(struct gve_priv *priv, u16 query_opcode, u32 starting_loc);
int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh);
int gve_adminq_query_rss_config(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh);
+int gve_adminq_report_nic_ts(struct gve_priv *priv,
+ dma_addr_t nic_ts_report_addr);
struct gve_ptype_lut;
int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
diff --git a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
index a71883e1d920..8f5021e59e0a 100644
--- a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
@@ -4,6 +4,7 @@
* Copyright (C) 2015-2024 Google, Inc.
*/
+#include <net/xdp_sock_drv.h>
#include "gve.h"
#include "gve_utils.h"
@@ -29,6 +30,10 @@ struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx)
/* Point buf_state to itself to mark it as allocated */
buf_state->next = buffer_id;
+ /* Clear the buffer pointers */
+ buf_state->page_info.page = NULL;
+ buf_state->xsk_buff = NULL;
+
return buf_state;
}
@@ -246,6 +251,7 @@ struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.order = 0,
.pool_size = GVE_PAGE_POOL_SIZE_MULTIPLIER * priv->rx_desc_cnt,
+ .nid = priv->numa_node,
.dev = &priv->pdev->dev,
.netdev = priv->dev,
.napi = &priv->ntfy_blocks[ntfy_id].napi,
@@ -285,7 +291,24 @@ int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc)
{
struct gve_rx_buf_state_dqo *buf_state;
- if (rx->dqo.page_pool) {
+ if (rx->xsk_pool) {
+ buf_state = gve_alloc_buf_state(rx);
+ if (unlikely(!buf_state))
+ return -ENOMEM;
+
+ buf_state->xsk_buff = xsk_buff_alloc(rx->xsk_pool);
+ if (unlikely(!buf_state->xsk_buff)) {
+ xsk_set_rx_need_wakeup(rx->xsk_pool);
+ gve_free_buf_state(rx, buf_state);
+ return -ENOMEM;
+ }
+ /* Allocated xsk buffer. Clear wakeup in case it was set. */
+ xsk_clear_rx_need_wakeup(rx->xsk_pool);
+ desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
+ desc->buf_addr =
+ cpu_to_le64(xsk_buff_xdp_get_dma(buf_state->xsk_buff));
+ return 0;
+ } else if (rx->dqo.page_pool) {
buf_state = gve_alloc_buf_state(rx);
if (WARN_ON_ONCE(!buf_state))
return -ENOMEM;
diff --git a/drivers/net/ethernet/google/gve/gve_desc_dqo.h b/drivers/net/ethernet/google/gve/gve_desc_dqo.h
index f79cd0591110..d17da841b5a0 100644
--- a/drivers/net/ethernet/google/gve/gve_desc_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_desc_dqo.h
@@ -247,7 +247,8 @@ struct gve_rx_compl_desc_dqo {
};
__le32 hash;
__le32 reserved6;
- __le64 reserved7;
+ __le32 reserved7;
+ __le32 ts; /* timestamp in nanosecs */
} __packed;
static_assert(sizeof(struct gve_rx_compl_desc_dqo) == 32);
diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h
index e83773fb891f..6eb442096e02 100644
--- a/drivers/net/ethernet/google/gve/gve_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_dqo.h
@@ -37,6 +37,8 @@ netdev_features_t gve_features_check_dqo(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean);
+bool gve_xdp_poll_dqo(struct gve_notify_block *block);
+bool gve_xsk_tx_poll_dqo(struct gve_notify_block *block, int budget);
int gve_rx_poll_dqo(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg);
@@ -60,6 +62,7 @@ int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
struct napi_struct *napi);
void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx);
void gve_rx_write_doorbell_dqo(const struct gve_priv *priv, int queue_idx);
+void gve_xdp_tx_flush_dqo(struct gve_priv *priv, u32 xdp_qid);
static inline void
gve_tx_put_doorbell_dqo(const struct gve_priv *priv,
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 3c1da0cf3f61..d0a223250845 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -76,7 +76,7 @@ static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] __nonstring_array
"adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt",
"adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt",
"adminq_query_flow_rules", "adminq_cfg_flow_rule", "adminq_cfg_rss_cnt",
- "adminq_query_rss_cnt",
+ "adminq_query_rss_cnt", "adminq_report_nic_timestamp_cnt",
};
static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
@@ -456,6 +456,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = priv->adminq_cfg_flow_rule_cnt;
data[i++] = priv->adminq_cfg_rss_cnt;
data[i++] = priv->adminq_query_rss_cnt;
+ data[i++] = priv->adminq_report_nic_timestamp_cnt;
}
static void gve_get_channels(struct net_device *netdev,
@@ -667,7 +668,7 @@ static u32 gve_get_priv_flags(struct net_device *netdev)
struct gve_priv *priv = netdev_priv(netdev);
u32 ret_flags = 0;
- /* Only 1 flag exists currently: report-stats (BIT(O)), so set that flag. */
+ /* Only 1 flag exists currently: report-stats (BIT(0)), so set that flag. */
if (priv->ethtool_flags & BIT(0))
ret_flags |= BIT(0);
return ret_flags;
@@ -798,9 +799,6 @@ static int gve_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
case ETHTOOL_SRXCLSRLDEL:
err = gve_del_flow_rule(priv, cmd);
break;
- case ETHTOOL_SRXFH:
- err = -EOPNOTSUPP;
- break;
default:
err = -EOPNOTSUPP;
break;
@@ -835,9 +833,6 @@ static int gve_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u
case ETHTOOL_GRXCLSRLALL:
err = gve_get_flow_rule_ids(priv, cmd, (u32 *)rule_locs);
break;
- case ETHTOOL_GRXFH:
- err = -EOPNOTSUPP;
- break;
default:
err = -EOPNOTSUPP;
break;
@@ -928,6 +923,27 @@ static int gve_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rx
return 0;
}
+static int gve_get_ts_info(struct net_device *netdev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ ethtool_op_get_ts_info(netdev, info);
+
+ if (priv->nic_timestamp_supported) {
+ info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ if (priv->ptp)
+ info->phc_index = ptp_clock_index(priv->ptp->clock);
+ }
+
+ return 0;
+}
+
const struct ethtool_ops gve_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
@@ -956,5 +972,5 @@ const struct ethtool_ops gve_ethtool_ops = {
.get_priv_flags = gve_get_priv_flags,
.set_priv_flags = gve_set_priv_flags,
.get_link_ksettings = gve_get_link_ksettings,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = gve_get_ts_info,
};
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index dc35a23ec47f..1f411d7c4373 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -4,6 +4,7 @@
* Copyright (C) 2015-2024 Google LLC
*/
+#include <linux/bitmap.h>
#include <linux/bpf.h>
#include <linux/cpumask.h>
#include <linux/etherdevice.h>
@@ -414,14 +415,24 @@ int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
bool reschedule = false;
int work_done = 0;
- if (block->tx)
- reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
+ if (block->tx) {
+ if (block->tx->q_num < priv->tx_cfg.num_queues)
+ reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
+ else
+ reschedule |= gve_xdp_poll_dqo(block);
+ }
if (!budget)
return 0;
if (block->rx) {
work_done = gve_rx_poll_dqo(block, budget);
+
+ /* Poll XSK TX as part of RX NAPI. Setup re-poll based on if
+ * either datapath has more work to do.
+ */
+ if (priv->xdp_prog)
+ reschedule |= gve_xsk_tx_poll_dqo(block, budget);
reschedule |= work_done == budget;
}
@@ -457,10 +468,19 @@ int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
return work_done;
}
+static const struct cpumask *gve_get_node_mask(struct gve_priv *priv)
+{
+ if (priv->numa_node == NUMA_NO_NODE)
+ return cpu_all_mask;
+ else
+ return cpumask_of_node(priv->numa_node);
+}
+
static int gve_alloc_notify_blocks(struct gve_priv *priv)
{
int num_vecs_requested = priv->num_ntfy_blks + 1;
- unsigned int active_cpus;
+ const struct cpumask *node_mask;
+ unsigned int cur_cpu;
int vecs_enabled;
int i, j;
int err;
@@ -499,8 +519,6 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues)
priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
}
- /* Half the notification blocks go to TX and half to RX */
- active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
/* Setup Management Vector - the last vector */
snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "gve-mgmnt@pci:%s",
@@ -529,6 +547,8 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
}
/* Setup the other blocks - the first n-1 vectors */
+ node_mask = gve_get_node_mask(priv);
+ cur_cpu = cpumask_first(node_mask);
for (i = 0; i < priv->num_ntfy_blks; i++) {
struct gve_notify_block *block = &priv->ntfy_blocks[i];
int msix_idx = i;
@@ -545,9 +565,17 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
goto abort_with_some_ntfy_blocks;
}
block->irq = priv->msix_vectors[msix_idx].vector;
- irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
- get_cpu_mask(i % active_cpus));
+ irq_set_affinity_and_hint(block->irq,
+ cpumask_of(cur_cpu));
block->irq_db_index = &priv->irq_db_indices[i].index;
+
+ cur_cpu = cpumask_next(cur_cpu, node_mask);
+ /* Wrap once CPUs in the node have been exhausted, or when
+ * starting RX queue affinities. TX and RX queues of the same
+ * index share affinity.
+ */
+ if (cur_cpu >= nr_cpu_ids || (i + 1) == priv->tx_cfg.max_queues)
+ cur_cpu = cpumask_first(node_mask);
}
return 0;
abort_with_some_ntfy_blocks:
@@ -619,9 +647,12 @@ static int gve_setup_device_resources(struct gve_priv *priv)
err = gve_alloc_counter_array(priv);
if (err)
goto abort_with_rss_config_cache;
- err = gve_alloc_notify_blocks(priv);
+ err = gve_init_clock(priv);
if (err)
goto abort_with_counter;
+ err = gve_alloc_notify_blocks(priv);
+ if (err)
+ goto abort_with_clock;
err = gve_alloc_stats_report(priv);
if (err)
goto abort_with_ntfy_blocks;
@@ -674,6 +705,8 @@ abort_with_stats_report:
gve_free_stats_report(priv);
abort_with_ntfy_blocks:
gve_free_notify_blocks(priv);
+abort_with_clock:
+ gve_teardown_clock(priv);
abort_with_counter:
gve_free_counter_array(priv);
abort_with_rss_config_cache:
@@ -722,6 +755,7 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
gve_free_counter_array(priv);
gve_free_notify_blocks(priv);
gve_free_stats_report(priv);
+ gve_teardown_clock(priv);
gve_clear_device_resources_ok(priv);
}
@@ -1030,7 +1064,7 @@ int gve_alloc_page(struct gve_priv *priv, struct device *dev,
struct page **page, dma_addr_t *dma,
enum dma_data_direction dir, gfp_t gfp_flags)
{
- *page = alloc_page(gfp_flags);
+ *page = alloc_pages_node(priv->numa_node, gfp_flags, 0);
if (!*page) {
priv->page_alloc_fail++;
return -ENOMEM;
@@ -1131,18 +1165,84 @@ static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
static void gve_turndown(struct gve_priv *priv);
static void gve_turnup(struct gve_priv *priv);
+static void gve_unreg_xsk_pool(struct gve_priv *priv, u16 qid)
+{
+ struct gve_rx_ring *rx;
+
+ if (!priv->rx)
+ return;
+
+ rx = &priv->rx[qid];
+ rx->xsk_pool = NULL;
+ if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
+ xdp_rxq_info_unreg_mem_model(&rx->xdp_rxq);
+
+ if (!priv->tx)
+ return;
+ priv->tx[gve_xdp_tx_queue_id(priv, qid)].xsk_pool = NULL;
+}
+
+static int gve_reg_xsk_pool(struct gve_priv *priv, struct net_device *dev,
+ struct xsk_buff_pool *pool, u16 qid)
+{
+ struct gve_rx_ring *rx;
+ u16 tx_qid;
+ int err;
+
+ rx = &priv->rx[qid];
+ err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL, pool);
+ if (err) {
+ gve_unreg_xsk_pool(priv, qid);
+ return err;
+ }
+
+ rx->xsk_pool = pool;
+
+ tx_qid = gve_xdp_tx_queue_id(priv, qid);
+ priv->tx[tx_qid].xsk_pool = pool;
+
+ return 0;
+}
+
+static void gve_unreg_xdp_info(struct gve_priv *priv)
+{
+ int i;
+
+ if (!priv->tx_cfg.num_xdp_queues || !priv->rx)
+ return;
+
+ for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ struct gve_rx_ring *rx = &priv->rx[i];
+
+ if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
+ xdp_rxq_info_unreg(&rx->xdp_rxq);
+
+ gve_unreg_xsk_pool(priv, i);
+ }
+}
+
+static struct xsk_buff_pool *gve_get_xsk_pool(struct gve_priv *priv, int qid)
+{
+ if (!test_bit(qid, priv->xsk_pools))
+ return NULL;
+
+ return xsk_get_pool_from_qid(priv->dev, qid);
+}
+
static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
{
struct napi_struct *napi;
struct gve_rx_ring *rx;
int err = 0;
- int i, j;
- u32 tx_qid;
+ int i;
if (!priv->tx_cfg.num_xdp_queues)
return 0;
for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ struct xsk_buff_pool *xsk_pool;
+
rx = &priv->rx[i];
napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
@@ -1150,7 +1250,11 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
napi->napi_id);
if (err)
goto err;
- if (gve_is_qpl(priv))
+
+ xsk_pool = gve_get_xsk_pool(priv, i);
+ if (xsk_pool)
+ err = gve_reg_xsk_pool(priv, dev, xsk_pool, i);
+ else if (gve_is_qpl(priv))
err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
MEM_TYPE_PAGE_SHARED,
NULL);
@@ -1160,60 +1264,14 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
rx->dqo.page_pool);
if (err)
goto err;
- rx->xsk_pool = xsk_get_pool_from_qid(dev, i);
- if (rx->xsk_pool) {
- err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, i,
- napi->napi_id);
- if (err)
- goto err;
- err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq,
- MEM_TYPE_XSK_BUFF_POOL, NULL);
- if (err)
- goto err;
- xsk_pool_set_rxq_info(rx->xsk_pool,
- &rx->xsk_rxq);
- }
- }
-
- for (i = 0; i < priv->tx_cfg.num_xdp_queues; i++) {
- tx_qid = gve_xdp_tx_queue_id(priv, i);
- priv->tx[tx_qid].xsk_pool = xsk_get_pool_from_qid(dev, i);
}
return 0;
err:
- for (j = i; j >= 0; j--) {
- rx = &priv->rx[j];
- if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
- xdp_rxq_info_unreg(&rx->xdp_rxq);
- if (xdp_rxq_info_is_reg(&rx->xsk_rxq))
- xdp_rxq_info_unreg(&rx->xsk_rxq);
- }
+ gve_unreg_xdp_info(priv);
return err;
}
-static void gve_unreg_xdp_info(struct gve_priv *priv)
-{
- int i, tx_qid;
-
- if (!priv->tx_cfg.num_xdp_queues || !priv->rx || !priv->tx)
- return;
-
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- struct gve_rx_ring *rx = &priv->rx[i];
-
- xdp_rxq_info_unreg(&rx->xdp_rxq);
- if (rx->xsk_pool) {
- xdp_rxq_info_unreg(&rx->xsk_rxq);
- rx->xsk_pool = NULL;
- }
- }
-
- for (i = 0; i < priv->tx_cfg.num_xdp_queues; i++) {
- tx_qid = gve_xdp_tx_queue_id(priv, i);
- priv->tx[tx_qid].xsk_pool = NULL;
- }
-}
static void gve_drain_page_cache(struct gve_priv *priv)
{
@@ -1510,14 +1568,24 @@ out:
return err;
}
+static int gve_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+
+ if (priv->queue_format == GVE_GQI_QPL_FORMAT)
+ return gve_xdp_xmit_gqi(dev, n, frames, flags);
+ else if (priv->queue_format == GVE_DQO_RDA_FORMAT)
+ return gve_xdp_xmit_dqo(dev, n, frames, flags);
+
+ return -EOPNOTSUPP;
+}
+
static int gve_xsk_pool_enable(struct net_device *dev,
struct xsk_buff_pool *pool,
u16 qid)
{
struct gve_priv *priv = netdev_priv(dev);
- struct napi_struct *napi;
- struct gve_rx_ring *rx;
- int tx_qid;
int err;
if (qid >= priv->rx_cfg.num_queues) {
@@ -1535,34 +1603,31 @@ static int gve_xsk_pool_enable(struct net_device *dev,
if (err)
return err;
+ set_bit(qid, priv->xsk_pools);
+
/* If XDP prog is not installed or interface is down, return. */
if (!priv->xdp_prog || !netif_running(dev))
return 0;
- rx = &priv->rx[qid];
- napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
- err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, qid, napi->napi_id);
- if (err)
- goto err;
-
- err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq,
- MEM_TYPE_XSK_BUFF_POOL, NULL);
+ err = gve_reg_xsk_pool(priv, dev, pool, qid);
if (err)
- goto err;
-
- xsk_pool_set_rxq_info(pool, &rx->xsk_rxq);
- rx->xsk_pool = pool;
-
- tx_qid = gve_xdp_tx_queue_id(priv, qid);
- priv->tx[tx_qid].xsk_pool = pool;
+ goto err_xsk_pool_dma_mapped;
+ /* Stop and start RDA queues to repost buffers. */
+ if (!gve_is_qpl(priv)) {
+ err = gve_configure_rings_xdp(priv, priv->rx_cfg.num_queues);
+ if (err)
+ goto err_xsk_pool_registered;
+ }
return 0;
-err:
- if (xdp_rxq_info_is_reg(&rx->xsk_rxq))
- xdp_rxq_info_unreg(&rx->xsk_rxq);
+err_xsk_pool_registered:
+ gve_unreg_xsk_pool(priv, qid);
+err_xsk_pool_dma_mapped:
+ clear_bit(qid, priv->xsk_pools);
xsk_pool_dma_unmap(pool,
- DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
+ DMA_ATTR_SKIP_CPU_SYNC |
+ DMA_ATTR_WEAK_ORDERING);
return err;
}
@@ -1574,18 +1639,28 @@ static int gve_xsk_pool_disable(struct net_device *dev,
struct napi_struct *napi_tx;
struct xsk_buff_pool *pool;
int tx_qid;
+ int err;
- pool = xsk_get_pool_from_qid(dev, qid);
- if (!pool)
- return -EINVAL;
if (qid >= priv->rx_cfg.num_queues)
return -EINVAL;
- /* If XDP prog is not installed or interface is down, unmap DMA and
- * return.
- */
- if (!priv->xdp_prog || !netif_running(dev))
- goto done;
+ clear_bit(qid, priv->xsk_pools);
+
+ pool = xsk_get_pool_from_qid(dev, qid);
+ if (pool)
+ xsk_pool_dma_unmap(pool,
+ DMA_ATTR_SKIP_CPU_SYNC |
+ DMA_ATTR_WEAK_ORDERING);
+
+ if (!netif_running(dev) || !priv->tx_cfg.num_xdp_queues)
+ return 0;
+
+ /* Stop and start RDA queues to repost buffers. */
+ if (!gve_is_qpl(priv) && priv->xdp_prog) {
+ err = gve_configure_rings_xdp(priv, priv->rx_cfg.num_queues);
+ if (err)
+ return err;
+ }
napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi;
napi_disable(napi_rx); /* make sure current rx poll is done */
@@ -1594,22 +1669,19 @@ static int gve_xsk_pool_disable(struct net_device *dev,
napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi;
napi_disable(napi_tx); /* make sure current tx poll is done */
- priv->rx[qid].xsk_pool = NULL;
- xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
- priv->tx[tx_qid].xsk_pool = NULL;
+ gve_unreg_xsk_pool(priv, qid);
smp_mb(); /* Make sure it is visible to the workers on datapath */
napi_enable(napi_rx);
- if (gve_rx_work_pending(&priv->rx[qid]))
- napi_schedule(napi_rx);
-
napi_enable(napi_tx);
- if (gve_tx_clean_pending(priv, &priv->tx[tx_qid]))
- napi_schedule(napi_tx);
+ if (gve_is_gqi(priv)) {
+ if (gve_rx_work_pending(&priv->rx[qid]))
+ napi_schedule(napi_rx);
+
+ if (gve_tx_clean_pending(priv, &priv->tx[tx_qid]))
+ napi_schedule(napi_tx);
+ }
-done:
- xsk_pool_dma_unmap(pool,
- DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
return 0;
}
@@ -1645,9 +1717,8 @@ static int verify_xdp_configuration(struct net_device *dev)
return -EOPNOTSUPP;
}
- if (priv->queue_format != GVE_GQI_QPL_FORMAT) {
- netdev_warn(dev, "XDP is not supported in mode %d.\n",
- priv->queue_format);
+ if (priv->header_split_enabled) {
+ netdev_warn(dev, "XDP is not supported when header-data split is enabled.\n");
return -EOPNOTSUPP;
}
@@ -1727,7 +1798,7 @@ int gve_adjust_config(struct gve_priv *priv,
{
int err;
- /* Allocate resources for the new confiugration */
+ /* Allocate resources for the new configuration */
err = gve_queues_mem_alloc(priv, tx_alloc_cfg, rx_alloc_cfg);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -1917,49 +1988,56 @@ static void gve_turnup_and_check_status(struct gve_priv *priv)
gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
}
-static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
+static struct gve_notify_block *gve_get_tx_notify_block(struct gve_priv *priv,
+ unsigned int txqueue)
{
- struct gve_notify_block *block;
- struct gve_tx_ring *tx = NULL;
- struct gve_priv *priv;
- u32 last_nic_done;
- u32 current_time;
u32 ntfy_idx;
- netdev_info(dev, "Timeout on tx queue, %d", txqueue);
- priv = netdev_priv(dev);
if (txqueue > priv->tx_cfg.num_queues)
- goto reset;
+ return NULL;
ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue);
if (ntfy_idx >= priv->num_ntfy_blks)
- goto reset;
+ return NULL;
+
+ return &priv->ntfy_blocks[ntfy_idx];
+}
- block = &priv->ntfy_blocks[ntfy_idx];
- tx = block->tx;
+static bool gve_tx_timeout_try_q_kick(struct gve_priv *priv,
+ unsigned int txqueue)
+{
+ struct gve_notify_block *block;
+ u32 current_time;
+
+ block = gve_get_tx_notify_block(priv, txqueue);
+
+ if (!block)
+ return false;
current_time = jiffies_to_msecs(jiffies);
- if (tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time)
- goto reset;
+ if (block->tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time)
+ return false;
- /* Check to see if there are missed completions, which will allow us to
- * kick the queue.
- */
- last_nic_done = gve_tx_load_event_counter(priv, tx);
- if (last_nic_done - tx->done) {
- netdev_info(dev, "Kicking queue %d", txqueue);
- iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
- napi_schedule(&block->napi);
- tx->last_kick_msec = current_time;
- goto out;
- } // Else reset.
+ netdev_info(priv->dev, "Kicking queue %d", txqueue);
+ napi_schedule(&block->napi);
+ block->tx->last_kick_msec = current_time;
+ return true;
+}
-reset:
- gve_schedule_reset(priv);
+static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct gve_notify_block *block;
+ struct gve_priv *priv;
-out:
- if (tx)
- tx->queue_timeout++;
+ netdev_info(dev, "Timeout on tx queue, %d", txqueue);
+ priv = netdev_priv(dev);
+
+ if (!gve_tx_timeout_try_q_kick(priv, txqueue))
+ gve_schedule_reset(priv);
+
+ block = gve_get_tx_notify_block(priv, txqueue);
+ if (block)
+ block->tx->queue_timeout++;
priv->tx_timeo_cnt++;
}
@@ -1971,10 +2049,13 @@ u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hsplit)
return GVE_DEFAULT_RX_BUFFER_SIZE;
}
-/* header-split is not supported on non-DQO_RDA yet even if device advertises it */
+/* Header split is only supported on DQ RDA queue format. If XDP is enabled,
+ * header split is not allowed.
+ */
bool gve_header_split_supported(const struct gve_priv *priv)
{
- return priv->header_buf_size && priv->queue_format == GVE_DQO_RDA_FORMAT;
+ return priv->header_buf_size &&
+ priv->queue_format == GVE_DQO_RDA_FORMAT && !priv->xdp_prog;
}
int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split)
@@ -2023,6 +2104,12 @@ static int gve_set_features(struct net_device *netdev,
if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
netdev->features ^= NETIF_F_LRO;
+ if (priv->xdp_prog && (netdev->features & NETIF_F_LRO)) {
+ netdev_warn(netdev,
+ "XDP is not supported when LRO is on.\n");
+ err = -EOPNOTSUPP;
+ goto revert_features;
+ }
if (netif_running(netdev)) {
err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
if (err)
@@ -2042,6 +2129,46 @@ revert_features:
return err;
}
+static int gve_get_ts_config(struct net_device *dev,
+ struct kernel_hwtstamp_config *kernel_config)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+
+ *kernel_config = priv->ts_config;
+ return 0;
+}
+
+static int gve_set_ts_config(struct net_device *dev,
+ struct kernel_hwtstamp_config *kernel_config,
+ struct netlink_ext_ack *extack)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+
+ if (kernel_config->tx_type != HWTSTAMP_TX_OFF) {
+ NL_SET_ERR_MSG_MOD(extack, "TX timestamping is not supported");
+ return -ERANGE;
+ }
+
+ if (kernel_config->rx_filter != HWTSTAMP_FILTER_NONE) {
+ if (!priv->nic_ts_report) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "RX timestamping is not supported");
+ kernel_config->rx_filter = HWTSTAMP_FILTER_NONE;
+ return -EOPNOTSUPP;
+ }
+
+ kernel_config->rx_filter = HWTSTAMP_FILTER_ALL;
+ gve_clock_nic_ts_read(priv);
+ ptp_schedule_worker(priv->ptp->clock, 0);
+ } else {
+ ptp_cancel_worker_sync(priv->ptp->clock);
+ }
+
+ priv->ts_config.rx_filter = kernel_config->rx_filter;
+
+ return 0;
+}
+
static const struct net_device_ops gve_netdev_ops = {
.ndo_start_xmit = gve_start_xmit,
.ndo_features_check = gve_features_check,
@@ -2053,6 +2180,8 @@ static const struct net_device_ops gve_netdev_ops = {
.ndo_bpf = gve_xdp,
.ndo_xdp_xmit = gve_xdp_xmit,
.ndo_xsk_wakeup = gve_xsk_wakeup,
+ .ndo_hwtstamp_get = gve_get_ts_config,
+ .ndo_hwtstamp_set = gve_set_ts_config,
};
static void gve_handle_status(struct gve_priv *priv, u32 status)
@@ -2182,6 +2311,10 @@ static void gve_set_netdev_xdp_features(struct gve_priv *priv)
xdp_features = NETDEV_XDP_ACT_BASIC;
xdp_features |= NETDEV_XDP_ACT_REDIRECT;
xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
+ } else if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
+ xdp_features = NETDEV_XDP_ACT_BASIC;
+ xdp_features |= NETDEV_XDP_ACT_REDIRECT;
+ xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
} else {
xdp_features = 0;
}
@@ -2236,7 +2369,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
goto err;
}
- /* Big TCP is only supported on DQ*/
+ /* Big TCP is only supported on DQO */
if (!gve_is_gqi(priv))
netif_set_tso_max_size(priv->dev, GVE_DQO_TX_MAX);
@@ -2246,6 +2379,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
*/
priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
priv->mgmt_msix_idx = priv->num_ntfy_blks;
+ priv->numa_node = dev_to_node(&priv->pdev->dev);
priv->tx_cfg.max_queues =
min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2);
@@ -2272,11 +2406,26 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
priv->rx_coalesce_usecs = GVE_RX_IRQ_RATELIMIT_US_DQO;
}
+ priv->ts_config.tx_type = HWTSTAMP_TX_OFF;
+ priv->ts_config.rx_filter = HWTSTAMP_FILTER_NONE;
+
setup_device:
+ priv->xsk_pools = bitmap_zalloc(priv->rx_cfg.max_queues, GFP_KERNEL);
+ if (!priv->xsk_pools) {
+ err = -ENOMEM;
+ goto err;
+ }
+
gve_set_netdev_xdp_features(priv);
err = gve_setup_device_resources(priv);
- if (!err)
- return 0;
+ if (err)
+ goto err_free_xsk_bitmap;
+
+ return 0;
+
+err_free_xsk_bitmap:
+ bitmap_free(priv->xsk_pools);
+ priv->xsk_pools = NULL;
err:
gve_adminq_free(&priv->pdev->dev, priv);
return err;
@@ -2286,6 +2435,8 @@ static void gve_teardown_priv_resources(struct gve_priv *priv)
{
gve_teardown_device_resources(priv);
gve_adminq_free(&priv->pdev->dev, priv);
+ bitmap_free(priv->xsk_pools);
+ priv->xsk_pools = NULL;
}
static void gve_trigger_reset(struct gve_priv *priv)
diff --git a/drivers/net/ethernet/google/gve/gve_ptp.c b/drivers/net/ethernet/google/gve/gve_ptp.c
new file mode 100644
index 000000000000..e96247c9d68d
--- /dev/null
+++ b/drivers/net/ethernet/google/gve/gve_ptp.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2025 Google LLC
+ */
+
+#include "gve.h"
+#include "gve_adminq.h"
+
+/* Interval to schedule a nic timestamp calibration, 250ms. */
+#define GVE_NIC_TS_SYNC_INTERVAL_MS 250
+
+/* Read the nic timestamp from hardware via the admin queue. */
+int gve_clock_nic_ts_read(struct gve_priv *priv)
+{
+ u64 nic_raw;
+ int err;
+
+ err = gve_adminq_report_nic_ts(priv, priv->nic_ts_report_bus);
+ if (err)
+ return err;
+
+ nic_raw = be64_to_cpu(priv->nic_ts_report->nic_timestamp);
+ WRITE_ONCE(priv->last_sync_nic_counter, nic_raw);
+
+ return 0;
+}
+
+static long gve_ptp_do_aux_work(struct ptp_clock_info *info)
+{
+ const struct gve_ptp *ptp = container_of(info, struct gve_ptp, info);
+ struct gve_priv *priv = ptp->priv;
+ int err;
+
+ if (gve_get_reset_in_progress(priv) || !gve_get_admin_queue_ok(priv))
+ goto out;
+
+ err = gve_clock_nic_ts_read(priv);
+ if (err && net_ratelimit())
+ dev_err(&priv->pdev->dev,
+ "%s read err %d\n", __func__, err);
+
+out:
+ return msecs_to_jiffies(GVE_NIC_TS_SYNC_INTERVAL_MS);
+}
+
+static const struct ptp_clock_info gve_ptp_caps = {
+ .owner = THIS_MODULE,
+ .name = "gve clock",
+ .do_aux_work = gve_ptp_do_aux_work,
+};
+
+static int gve_ptp_init(struct gve_priv *priv)
+{
+ struct gve_ptp *ptp;
+ int err;
+
+ if (!priv->nic_timestamp_supported) {
+ dev_dbg(&priv->pdev->dev, "Device does not support PTP\n");
+ return -EOPNOTSUPP;
+ }
+
+ priv->ptp = kzalloc(sizeof(*priv->ptp), GFP_KERNEL);
+ if (!priv->ptp)
+ return -ENOMEM;
+
+ ptp = priv->ptp;
+ ptp->info = gve_ptp_caps;
+ ptp->clock = ptp_clock_register(&ptp->info, &priv->pdev->dev);
+
+ if (IS_ERR(ptp->clock)) {
+ dev_err(&priv->pdev->dev, "PTP clock registration failed\n");
+ err = PTR_ERR(ptp->clock);
+ goto free_ptp;
+ }
+
+ ptp->priv = priv;
+ return 0;
+
+free_ptp:
+ kfree(ptp);
+ priv->ptp = NULL;
+ return err;
+}
+
+static void gve_ptp_release(struct gve_priv *priv)
+{
+ struct gve_ptp *ptp = priv->ptp;
+
+ if (!ptp)
+ return;
+
+ if (ptp->clock)
+ ptp_clock_unregister(ptp->clock);
+
+ kfree(ptp);
+ priv->ptp = NULL;
+}
+
+int gve_init_clock(struct gve_priv *priv)
+{
+ int err;
+
+ if (!priv->nic_timestamp_supported)
+ return 0;
+
+ err = gve_ptp_init(priv);
+ if (err)
+ return err;
+
+ priv->nic_ts_report =
+ dma_alloc_coherent(&priv->pdev->dev,
+ sizeof(struct gve_nic_ts_report),
+ &priv->nic_ts_report_bus,
+ GFP_KERNEL);
+ if (!priv->nic_ts_report) {
+ dev_err(&priv->pdev->dev, "%s dma alloc error\n", __func__);
+ err = -ENOMEM;
+ goto release_ptp;
+ }
+
+ return 0;
+
+release_ptp:
+ gve_ptp_release(priv);
+ return err;
+}
+
+void gve_teardown_clock(struct gve_priv *priv)
+{
+ gve_ptp_release(priv);
+
+ if (priv->nic_ts_report) {
+ dma_free_coherent(&priv->pdev->dev,
+ sizeof(struct gve_nic_ts_report),
+ priv->nic_ts_report, priv->nic_ts_report_bus);
+ priv->nic_ts_report = NULL;
+ }
+}
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 90e875c1832f..ec424d2f4f57 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -192,8 +192,8 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
*/
slots = rx->mask + 1;
- rx->data.page_info = kvzalloc(slots *
- sizeof(*rx->data.page_info), GFP_KERNEL);
+ rx->data.page_info = kvcalloc_node(slots, sizeof(*rx->data.page_info),
+ GFP_KERNEL, priv->numa_node);
if (!rx->data.page_info)
return -ENOMEM;
@@ -216,7 +216,8 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
if (!rx->data.raw_addressing) {
for (j = 0; j < rx->qpl_copy_pool_mask + 1; j++) {
- struct page *page = alloc_page(GFP_KERNEL);
+ struct page *page = alloc_pages_node(priv->numa_node,
+ GFP_KERNEL, 0);
if (!page) {
err = -ENOMEM;
@@ -303,10 +304,9 @@ int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
rx->qpl_copy_pool_mask = min_t(u32, U32_MAX, slots * 2) - 1;
rx->qpl_copy_pool_head = 0;
- rx->qpl_copy_pool = kvcalloc(rx->qpl_copy_pool_mask + 1,
- sizeof(rx->qpl_copy_pool[0]),
- GFP_KERNEL);
-
+ rx->qpl_copy_pool = kvcalloc_node(rx->qpl_copy_pool_mask + 1,
+ sizeof(rx->qpl_copy_pool[0]),
+ GFP_KERNEL, priv->numa_node);
if (!rx->qpl_copy_pool) {
err = -ENOMEM;
goto abort_with_slots;
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index dcb0545baa50..7380c2b7a2d8 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -8,6 +8,7 @@
#include "gve_dqo.h"
#include "gve_adminq.h"
#include "gve_utils.h"
+#include <linux/bpf.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/skbuff.h>
@@ -15,6 +16,7 @@
#include <net/ip6_checksum.h>
#include <net/ipv6.h>
#include <net/tcp.h>
+#include <net/xdp_sock_drv.h>
static void gve_rx_free_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
{
@@ -148,6 +150,10 @@ void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
gve_free_to_page_pool(rx, bs, false);
else
gve_free_qpl_page_dqo(bs);
+ if (gve_buf_state_is_allocated(rx, bs) && bs->xsk_buff) {
+ xsk_buff_free(bs->xsk_buff);
+ bs->xsk_buff = NULL;
+ }
}
if (rx->dqo.qpl) {
@@ -236,9 +242,9 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
rx->dqo.num_buf_states = cfg->raw_addressing ? buffer_queue_slots :
gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
- rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states,
- sizeof(rx->dqo.buf_states[0]),
- GFP_KERNEL);
+ rx->dqo.buf_states = kvcalloc_node(rx->dqo.num_buf_states,
+ sizeof(rx->dqo.buf_states[0]),
+ GFP_KERNEL, priv->numa_node);
if (!rx->dqo.buf_states)
return -ENOMEM;
@@ -437,6 +443,29 @@ static void gve_rx_skb_hash(struct sk_buff *skb,
skb_set_hash(skb, le32_to_cpu(compl_desc->hash), hash_type);
}
+/* Expand the hardware timestamp to the full 64 bits of width, and add it to the
+ * skb.
+ *
+ * This algorithm works by using the passed hardware timestamp to generate a
+ * diff relative to the last read of the nic clock. This diff can be positive or
+ * negative, as it is possible that we have read the clock more recently than
+ * the hardware has received this packet. To detect this, we use the high bit of
+ * the diff, and assume that the read is more recent if the high bit is set. In
+ * this case we invert the process.
+ *
+ * Note that this means if the time delta between packet reception and the last
+ * clock read is greater than ~2 seconds, this will provide invalid results.
+ */
+static void gve_rx_skb_hwtstamp(struct gve_rx_ring *rx, u32 hwts)
+{
+ u64 last_read = READ_ONCE(rx->gve->last_sync_nic_counter);
+ struct sk_buff *skb = rx->ctx.skb_head;
+ u32 low = (u32)last_read;
+ s32 diff = hwts - low;
+
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(last_read + diff);
+}
+
static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx)
{
if (!rx->ctx.skb_head)
@@ -464,7 +493,7 @@ static int gve_rx_copy_ondemand(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state,
u16 buf_len)
{
- struct page *page = alloc_page(GFP_ATOMIC);
+ struct page *page = alloc_pages_node(rx->gve->numa_node, GFP_ATOMIC, 0);
int num_frags;
if (!page)
@@ -547,27 +576,146 @@ static int gve_rx_append_frags(struct napi_struct *napi,
return 0;
}
+static int gve_xdp_tx_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct xdp_buff *xdp)
+{
+ struct gve_tx_ring *tx;
+ struct xdp_frame *xdpf;
+ u32 tx_qid;
+ int err;
+
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf)) {
+ if (rx->xsk_pool)
+ xsk_buff_free(xdp);
+ return -ENOSPC;
+ }
+
+ tx_qid = gve_xdp_tx_queue_id(priv, rx->q_num);
+ tx = &priv->tx[tx_qid];
+ spin_lock(&tx->dqo_tx.xdp_lock);
+ err = gve_xdp_xmit_one_dqo(priv, tx, xdpf);
+ spin_unlock(&tx->dqo_tx.xdp_lock);
+
+ return err;
+}
+
+static void gve_xsk_done_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct xdp_buff *xdp, struct bpf_prog *xprog,
+ int xdp_act)
+{
+ switch (xdp_act) {
+ case XDP_ABORTED:
+ case XDP_DROP:
+ default:
+ xsk_buff_free(xdp);
+ break;
+ case XDP_TX:
+ if (unlikely(gve_xdp_tx_dqo(priv, rx, xdp)))
+ goto err;
+ break;
+ case XDP_REDIRECT:
+ if (unlikely(xdp_do_redirect(priv->dev, xdp, xprog)))
+ goto err;
+ break;
+ }
+
+ u64_stats_update_begin(&rx->statss);
+ if ((u32)xdp_act < GVE_XDP_ACTIONS)
+ rx->xdp_actions[xdp_act]++;
+ u64_stats_update_end(&rx->statss);
+ return;
+
+err:
+ u64_stats_update_begin(&rx->statss);
+ if (xdp_act == XDP_TX)
+ rx->xdp_tx_errors++;
+ if (xdp_act == XDP_REDIRECT)
+ rx->xdp_redirect_errors++;
+ u64_stats_update_end(&rx->statss);
+}
+
static void gve_xdp_done_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
struct xdp_buff *xdp, struct bpf_prog *xprog,
int xdp_act,
struct gve_rx_buf_state_dqo *buf_state)
{
- u64_stats_update_begin(&rx->statss);
+ int err;
switch (xdp_act) {
case XDP_ABORTED:
case XDP_DROP:
default:
- rx->xdp_actions[xdp_act]++;
+ gve_free_buffer(rx, buf_state);
break;
case XDP_TX:
- rx->xdp_tx_errors++;
+ err = gve_xdp_tx_dqo(priv, rx, xdp);
+ if (unlikely(err))
+ goto err;
+ gve_reuse_buffer(rx, buf_state);
break;
case XDP_REDIRECT:
- rx->xdp_redirect_errors++;
+ err = xdp_do_redirect(priv->dev, xdp, xprog);
+ if (unlikely(err))
+ goto err;
+ gve_reuse_buffer(rx, buf_state);
break;
}
+ u64_stats_update_begin(&rx->statss);
+ if ((u32)xdp_act < GVE_XDP_ACTIONS)
+ rx->xdp_actions[xdp_act]++;
+ u64_stats_update_end(&rx->statss);
+ return;
+err:
+ u64_stats_update_begin(&rx->statss);
+ if (xdp_act == XDP_TX)
+ rx->xdp_tx_errors++;
+ else if (xdp_act == XDP_REDIRECT)
+ rx->xdp_redirect_errors++;
u64_stats_update_end(&rx->statss);
gve_free_buffer(rx, buf_state);
+ return;
+}
+
+static int gve_rx_xsk_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state, int buf_len,
+ struct bpf_prog *xprog)
+{
+ struct xdp_buff *xdp = buf_state->xsk_buff;
+ struct gve_priv *priv = rx->gve;
+ int xdp_act;
+
+ xdp->data_end = xdp->data + buf_len;
+ xsk_buff_dma_sync_for_cpu(xdp);
+
+ if (xprog) {
+ xdp_act = bpf_prog_run_xdp(xprog, xdp);
+ buf_len = xdp->data_end - xdp->data;
+ if (xdp_act != XDP_PASS) {
+ gve_xsk_done_dqo(priv, rx, xdp, xprog, xdp_act);
+ gve_free_buf_state(rx, buf_state);
+ return 0;
+ }
+ }
+
+ /* Copy the data to skb */
+ rx->ctx.skb_head = gve_rx_copy_data(priv->dev, napi,
+ xdp->data, buf_len);
+ if (unlikely(!rx->ctx.skb_head)) {
+ xsk_buff_free(xdp);
+ gve_free_buf_state(rx, buf_state);
+ return -ENOMEM;
+ }
+ rx->ctx.skb_tail = rx->ctx.skb_head;
+
+ /* Free XSK buffer and Buffer state */
+ xsk_buff_free(xdp);
+ gve_free_buf_state(rx, buf_state);
+
+ /* Update Stats */
+ u64_stats_update_begin(&rx->statss);
+ rx->xdp_actions[XDP_PASS]++;
+ u64_stats_update_end(&rx->statss);
+ return 0;
}
/* Returns 0 if descriptor is completed successfully.
@@ -608,6 +756,10 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
buf_len = compl_desc->packet_len;
hdr_len = compl_desc->header_len;
+ xprog = READ_ONCE(priv->xdp_prog);
+ if (buf_state->xsk_buff)
+ return gve_rx_xsk_dqo(napi, rx, buf_state, buf_len, xprog);
+
/* Page might have not been used for awhile and was likely last written
* by a different thread.
*/
@@ -658,7 +810,6 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
return 0;
}
- xprog = READ_ONCE(priv->xdp_prog);
if (xprog) {
struct xdp_buff xdp;
void *old_data;
@@ -767,6 +918,9 @@ static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi,
if (feat & NETIF_F_RXCSUM)
gve_rx_skb_csum(rx->ctx.skb_head, desc, ptype);
+ if (rx->gve->ts_config.rx_filter == HWTSTAMP_FILTER_ALL)
+ gve_rx_skb_hwtstamp(rx, le32_to_cpu(desc->ts));
+
/* RSC packets must set gso_size otherwise the TCP stack will complain
* that packets are larger than MTU.
*/
@@ -786,16 +940,27 @@ static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi,
int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
{
- struct napi_struct *napi = &block->napi;
- netdev_features_t feat = napi->dev->features;
-
- struct gve_rx_ring *rx = block->rx;
- struct gve_rx_compl_queue_dqo *complq = &rx->dqo.complq;
-
+ struct gve_rx_compl_queue_dqo *complq;
+ struct napi_struct *napi;
+ netdev_features_t feat;
+ struct gve_rx_ring *rx;
+ struct gve_priv *priv;
+ u64 xdp_redirects;
u32 work_done = 0;
u64 bytes = 0;
+ u64 xdp_txs;
int err;
+ napi = &block->napi;
+ feat = napi->dev->features;
+
+ rx = block->rx;
+ priv = rx->gve;
+ complq = &rx->dqo.complq;
+
+ xdp_redirects = rx->xdp_actions[XDP_REDIRECT];
+ xdp_txs = rx->xdp_actions[XDP_TX];
+
while (work_done < budget) {
struct gve_rx_compl_desc_dqo *compl_desc =
&complq->desc_ring[complq->head];
@@ -869,6 +1034,12 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
rx->ctx.skb_tail = NULL;
}
+ if (xdp_txs != rx->xdp_actions[XDP_TX])
+ gve_xdp_tx_flush_dqo(priv, rx->q_num);
+
+ if (xdp_redirects != rx->xdp_actions[XDP_REDIRECT])
+ xdp_do_flush();
+
gve_rx_post_buffers_dqo(rx);
u64_stats_update_begin(&rx->statss);
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 1b40bf0c811a..c6ff0968929d 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -823,8 +823,8 @@ static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx,
return ndescs;
}
-int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
- u32 flags)
+int gve_xdp_xmit_gqi(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
{
struct gve_priv *priv = netdev_priv(dev);
struct gve_tx_ring *tx;
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index 9d705d94b065..6f1d515673d2 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -9,9 +9,11 @@
#include "gve_utils.h"
#include "gve_dqo.h"
#include <net/ip.h>
+#include <linux/bpf.h>
#include <linux/tcp.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
+#include <net/xdp_sock_drv.h>
/* Returns true if tx_bufs are available. */
static bool gve_has_free_tx_qpl_bufs(struct gve_tx_ring *tx, int count)
@@ -110,6 +112,14 @@ static bool gve_has_pending_packet(struct gve_tx_ring *tx)
return false;
}
+void gve_xdp_tx_flush_dqo(struct gve_priv *priv, u32 xdp_qid)
+{
+ u32 tx_qid = gve_xdp_tx_queue_id(priv, xdp_qid);
+ struct gve_tx_ring *tx = &priv->tx[tx_qid];
+
+ gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail);
+}
+
static struct gve_tx_pending_packet_dqo *
gve_alloc_pending_packet(struct gve_tx_ring *tx)
{
@@ -198,7 +208,8 @@ void gve_tx_stop_ring_dqo(struct gve_priv *priv, int idx)
gve_remove_napi(priv, ntfy_idx);
gve_clean_tx_done_dqo(priv, tx, /*napi=*/NULL);
- netdev_tx_reset_queue(tx->netdev_txq);
+ if (tx->netdev_txq)
+ netdev_tx_reset_queue(tx->netdev_txq);
gve_tx_clean_pending_packets(tx);
gve_tx_remove_from_block(priv, idx);
}
@@ -231,6 +242,9 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
tx->dqo.tx_ring = NULL;
}
+ kvfree(tx->dqo.xsk_reorder_queue);
+ tx->dqo.xsk_reorder_queue = NULL;
+
kvfree(tx->dqo.pending_packets);
tx->dqo.pending_packets = NULL;
@@ -276,7 +290,8 @@ void gve_tx_start_ring_dqo(struct gve_priv *priv, int idx)
gve_tx_add_to_block(priv, idx);
- tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
+ if (idx < priv->tx_cfg.num_queues)
+ tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
gve_add_napi(priv, ntfy_idx, gve_napi_poll_dqo);
}
@@ -295,6 +310,7 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
memset(tx, 0, sizeof(*tx));
tx->q_num = idx;
tx->dev = hdev;
+ spin_lock_init(&tx->dqo_tx.xdp_lock);
atomic_set_release(&tx->dqo_compl.hw_tx_head, 0);
/* Queue sizes must be a power of 2 */
@@ -333,6 +349,17 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
tx->dqo.pending_packets[tx->dqo.num_pending_packets - 1].next = -1;
atomic_set_release(&tx->dqo_compl.free_pending_packets, -1);
+
+ /* Only alloc xsk pool for XDP queues */
+ if (idx >= cfg->qcfg->num_queues && cfg->num_xdp_rings) {
+ tx->dqo.xsk_reorder_queue =
+ kvcalloc(tx->dqo.complq_mask + 1,
+ sizeof(tx->dqo.xsk_reorder_queue[0]),
+ GFP_KERNEL);
+ if (!tx->dqo.xsk_reorder_queue)
+ goto err;
+ }
+
tx->dqo_compl.miss_completions.head = -1;
tx->dqo_compl.miss_completions.tail = -1;
tx->dqo_compl.timed_out_completions.head = -1;
@@ -439,12 +466,28 @@ static u32 num_avail_tx_slots(const struct gve_tx_ring *tx)
return tx->mask - num_used;
}
+/* Checks if the requested number of slots are available in the ring */
+static bool gve_has_tx_slots_available(struct gve_tx_ring *tx, u32 slots_req)
+{
+ u32 num_avail = num_avail_tx_slots(tx);
+
+ slots_req += GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP;
+
+ if (num_avail >= slots_req)
+ return true;
+
+ /* Update cached TX head pointer */
+ tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head);
+
+ return num_avail_tx_slots(tx) >= slots_req;
+}
+
static bool gve_has_avail_slots_tx_dqo(struct gve_tx_ring *tx,
int desc_count, int buf_count)
{
return gve_has_pending_packet(tx) &&
- num_avail_tx_slots(tx) >= desc_count &&
- gve_has_free_tx_qpl_bufs(tx, buf_count);
+ gve_has_tx_slots_available(tx, desc_count) &&
+ gve_has_free_tx_qpl_bufs(tx, buf_count);
}
/* Stops the queue if available descriptors is less than 'count'.
@@ -456,12 +499,6 @@ static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx,
if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count)))
return 0;
- /* Update cached TX head pointer */
- tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head);
-
- if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count)))
- return 0;
-
/* No space, so stop the queue */
tx->stop_queue++;
netif_tx_stop_queue(tx->netdev_txq);
@@ -472,8 +509,6 @@ static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx,
/* After stopping queue, check if we can transmit again in order to
* avoid TOCTOU bug.
*/
- tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head);
-
if (likely(!gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count)))
return -EBUSY;
@@ -500,11 +535,9 @@ static void gve_extract_tx_metadata_dqo(const struct sk_buff *skb,
}
static void gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring *tx, u32 *desc_idx,
- struct sk_buff *skb, u32 len, u64 addr,
+ bool enable_csum, u32 len, u64 addr,
s16 compl_tag, bool eop, bool is_gso)
{
- const bool checksum_offload_en = skb->ip_summed == CHECKSUM_PARTIAL;
-
while (len > 0) {
struct gve_tx_pkt_desc_dqo *desc =
&tx->dqo.tx_ring[*desc_idx].pkt;
@@ -515,7 +548,7 @@ static void gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring *tx, u32 *desc_idx,
.buf_addr = cpu_to_le64(addr),
.dtype = GVE_TX_PKT_DESC_DTYPE_DQO,
.end_of_packet = cur_eop,
- .checksum_offload_enable = checksum_offload_en,
+ .checksum_offload_enable = enable_csum,
.compl_tag = cpu_to_le16(compl_tag),
.buf_size = cur_len,
};
@@ -612,6 +645,25 @@ gve_tx_fill_general_ctx_desc(struct gve_tx_general_context_desc_dqo *desc,
};
}
+static void gve_tx_update_tail(struct gve_tx_ring *tx, u32 desc_idx)
+{
+ u32 last_desc_idx = (desc_idx - 1) & tx->mask;
+ u32 last_report_event_interval =
+ (last_desc_idx - tx->dqo_tx.last_re_idx) & tx->mask;
+
+ /* Commit the changes to our state */
+ tx->dqo_tx.tail = desc_idx;
+
+ /* Request a descriptor completion on the last descriptor of the
+ * packet if we are allowed to by the HW enforced interval.
+ */
+
+ if (unlikely(last_report_event_interval >= GVE_TX_MIN_RE_INTERVAL)) {
+ tx->dqo.tx_ring[last_desc_idx].pkt.report_event = true;
+ tx->dqo_tx.last_re_idx = last_desc_idx;
+ }
+}
+
static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
struct sk_buff *skb,
struct gve_tx_pending_packet_dqo *pkt,
@@ -619,6 +671,7 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
u32 *desc_idx,
bool is_gso)
{
+ bool enable_csum = skb->ip_summed == CHECKSUM_PARTIAL;
const struct skb_shared_info *shinfo = skb_shinfo(skb);
int i;
@@ -644,7 +697,7 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
++pkt->num_bufs;
- gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr,
+ gve_tx_fill_pkt_desc_dqo(tx, desc_idx, enable_csum, len, addr,
completion_tag,
/*eop=*/shinfo->nr_frags == 0, is_gso);
}
@@ -664,7 +717,7 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
dma[pkt->num_bufs], addr);
++pkt->num_bufs;
- gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr,
+ gve_tx_fill_pkt_desc_dqo(tx, desc_idx, enable_csum, len, addr,
completion_tag, is_eop, is_gso);
}
@@ -709,6 +762,7 @@ static int gve_tx_add_skb_copy_dqo(struct gve_tx_ring *tx,
u32 *desc_idx,
bool is_gso)
{
+ bool enable_csum = skb->ip_summed == CHECKSUM_PARTIAL;
u32 copy_offset = 0;
dma_addr_t dma_addr;
u32 copy_len;
@@ -730,7 +784,7 @@ static int gve_tx_add_skb_copy_dqo(struct gve_tx_ring *tx,
copy_offset += copy_len;
dma_sync_single_for_device(tx->dev, dma_addr,
copy_len, DMA_TO_DEVICE);
- gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb,
+ gve_tx_fill_pkt_desc_dqo(tx, desc_idx, enable_csum,
copy_len,
dma_addr,
completion_tag,
@@ -768,6 +822,7 @@ static int gve_tx_add_skb_dqo(struct gve_tx_ring *tx,
return -ENOMEM;
pkt->skb = skb;
+ pkt->type = GVE_TX_PENDING_PACKET_DQO_SKB;
completion_tag = pkt - tx->dqo.pending_packets;
gve_extract_tx_metadata_dqo(skb, &metadata);
@@ -800,24 +855,7 @@ static int gve_tx_add_skb_dqo(struct gve_tx_ring *tx,
tx->dqo_tx.posted_packet_desc_cnt += pkt->num_bufs;
- /* Commit the changes to our state */
- tx->dqo_tx.tail = desc_idx;
-
- /* Request a descriptor completion on the last descriptor of the
- * packet if we are allowed to by the HW enforced interval.
- */
- {
- u32 last_desc_idx = (desc_idx - 1) & tx->mask;
- u32 last_report_event_interval =
- (last_desc_idx - tx->dqo_tx.last_re_idx) & tx->mask;
-
- if (unlikely(last_report_event_interval >=
- GVE_TX_MIN_RE_INTERVAL)) {
- tx->dqo.tx_ring[last_desc_idx].pkt.report_event = true;
- tx->dqo_tx.last_re_idx = last_desc_idx;
- }
- }
-
+ gve_tx_update_tail(tx, desc_idx);
return 0;
err:
@@ -951,9 +989,8 @@ static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx,
/* Metadata + (optional TSO) + data descriptors. */
total_num_descs = 1 + skb_is_gso(skb) + num_buffer_descs;
- if (unlikely(gve_maybe_stop_tx_dqo(tx, total_num_descs +
- GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP,
- num_buffer_descs))) {
+ if (unlikely(gve_maybe_stop_tx_dqo(tx, total_num_descs,
+ num_buffer_descs))) {
return -1;
}
@@ -970,6 +1007,38 @@ drop:
return 0;
}
+static void gve_xsk_reorder_queue_push_dqo(struct gve_tx_ring *tx,
+ u16 completion_tag)
+{
+ u32 tail = atomic_read(&tx->dqo_tx.xsk_reorder_queue_tail);
+
+ tx->dqo.xsk_reorder_queue[tail] = completion_tag;
+ tail = (tail + 1) & tx->dqo.complq_mask;
+ atomic_set_release(&tx->dqo_tx.xsk_reorder_queue_tail, tail);
+}
+
+static struct gve_tx_pending_packet_dqo *
+gve_xsk_reorder_queue_head(struct gve_tx_ring *tx)
+{
+ u32 head = tx->dqo_compl.xsk_reorder_queue_head;
+
+ if (head == tx->dqo_compl.xsk_reorder_queue_tail) {
+ tx->dqo_compl.xsk_reorder_queue_tail =
+ atomic_read_acquire(&tx->dqo_tx.xsk_reorder_queue_tail);
+
+ if (head == tx->dqo_compl.xsk_reorder_queue_tail)
+ return NULL;
+ }
+
+ return &tx->dqo.pending_packets[tx->dqo.xsk_reorder_queue[head]];
+}
+
+static void gve_xsk_reorder_queue_pop_dqo(struct gve_tx_ring *tx)
+{
+ tx->dqo_compl.xsk_reorder_queue_head++;
+ tx->dqo_compl.xsk_reorder_queue_head &= tx->dqo.complq_mask;
+}
+
/* Transmit a given skb and ring the doorbell. */
netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev)
{
@@ -993,6 +1062,62 @@ netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+static bool gve_xsk_tx_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
+ int budget)
+{
+ struct xsk_buff_pool *pool = tx->xsk_pool;
+ struct xdp_desc desc;
+ bool repoll = false;
+ int sent = 0;
+
+ spin_lock(&tx->dqo_tx.xdp_lock);
+ for (; sent < budget; sent++) {
+ struct gve_tx_pending_packet_dqo *pkt;
+ s16 completion_tag;
+ dma_addr_t addr;
+ u32 desc_idx;
+
+ if (unlikely(!gve_has_avail_slots_tx_dqo(tx, 1, 1))) {
+ repoll = true;
+ break;
+ }
+
+ if (!xsk_tx_peek_desc(pool, &desc))
+ break;
+
+ pkt = gve_alloc_pending_packet(tx);
+ pkt->type = GVE_TX_PENDING_PACKET_DQO_XSK;
+ pkt->num_bufs = 0;
+ completion_tag = pkt - tx->dqo.pending_packets;
+
+ addr = xsk_buff_raw_get_dma(pool, desc.addr);
+ xsk_buff_raw_dma_sync_for_device(pool, addr, desc.len);
+
+ desc_idx = tx->dqo_tx.tail;
+ gve_tx_fill_pkt_desc_dqo(tx, &desc_idx,
+ true, desc.len,
+ addr, completion_tag, true,
+ false);
+ ++pkt->num_bufs;
+ gve_tx_update_tail(tx, desc_idx);
+ tx->dqo_tx.posted_packet_desc_cnt += pkt->num_bufs;
+ gve_xsk_reorder_queue_push_dqo(tx, completion_tag);
+ }
+
+ if (sent) {
+ gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail);
+ xsk_tx_release(pool);
+ }
+
+ spin_unlock(&tx->dqo_tx.xdp_lock);
+
+ u64_stats_update_begin(&tx->statss);
+ tx->xdp_xsk_sent += sent;
+ u64_stats_update_end(&tx->statss);
+
+ return (sent == budget) || repoll;
+}
+
static void add_to_list(struct gve_tx_ring *tx, struct gve_index_list *list,
struct gve_tx_pending_packet_dqo *pending_packet)
{
@@ -1107,16 +1232,35 @@ static void gve_handle_packet_completion(struct gve_priv *priv,
}
}
tx->dqo_tx.completed_packet_desc_cnt += pending_packet->num_bufs;
- if (tx->dqo.qpl)
- gve_free_tx_qpl_bufs(tx, pending_packet);
- else
+
+ switch (pending_packet->type) {
+ case GVE_TX_PENDING_PACKET_DQO_SKB:
+ if (tx->dqo.qpl)
+ gve_free_tx_qpl_bufs(tx, pending_packet);
+ else
+ gve_unmap_packet(tx->dev, pending_packet);
+ (*pkts)++;
+ *bytes += pending_packet->skb->len;
+
+ napi_consume_skb(pending_packet->skb, is_napi);
+ pending_packet->skb = NULL;
+ gve_free_pending_packet(tx, pending_packet);
+ break;
+ case GVE_TX_PENDING_PACKET_DQO_XDP_FRAME:
gve_unmap_packet(tx->dev, pending_packet);
+ (*pkts)++;
+ *bytes += pending_packet->xdpf->len;
- *bytes += pending_packet->skb->len;
- (*pkts)++;
- napi_consume_skb(pending_packet->skb, is_napi);
- pending_packet->skb = NULL;
- gve_free_pending_packet(tx, pending_packet);
+ xdp_return_frame(pending_packet->xdpf);
+ pending_packet->xdpf = NULL;
+ gve_free_pending_packet(tx, pending_packet);
+ break;
+ case GVE_TX_PENDING_PACKET_DQO_XSK:
+ pending_packet->state = GVE_PACKET_STATE_XSK_COMPLETE;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
}
static void gve_handle_miss_completion(struct gve_priv *priv,
@@ -1213,8 +1357,34 @@ static void remove_timed_out_completions(struct gve_priv *priv,
remove_from_list(tx, &tx->dqo_compl.timed_out_completions,
pending_packet);
+
+ /* Need to count XSK packets in xsk_tx_completed. */
+ if (pending_packet->type == GVE_TX_PENDING_PACKET_DQO_XSK)
+ pending_packet->state = GVE_PACKET_STATE_XSK_COMPLETE;
+ else
+ gve_free_pending_packet(tx, pending_packet);
+ }
+}
+
+static void gve_tx_process_xsk_completions(struct gve_tx_ring *tx)
+{
+ u32 num_xsks = 0;
+
+ while (true) {
+ struct gve_tx_pending_packet_dqo *pending_packet =
+ gve_xsk_reorder_queue_head(tx);
+
+ if (!pending_packet ||
+ pending_packet->state != GVE_PACKET_STATE_XSK_COMPLETE)
+ break;
+
+ num_xsks++;
+ gve_xsk_reorder_queue_pop_dqo(tx);
gve_free_pending_packet(tx, pending_packet);
}
+
+ if (num_xsks)
+ xsk_tx_completed(tx->xsk_pool, num_xsks);
}
int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
@@ -1287,13 +1457,17 @@ int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
num_descs_cleaned++;
}
- netdev_tx_completed_queue(tx->netdev_txq,
- pkt_compl_pkts + miss_compl_pkts,
- pkt_compl_bytes + miss_compl_bytes);
+ if (tx->netdev_txq)
+ netdev_tx_completed_queue(tx->netdev_txq,
+ pkt_compl_pkts + miss_compl_pkts,
+ pkt_compl_bytes + miss_compl_bytes);
remove_miss_completions(priv, tx);
remove_timed_out_completions(priv, tx);
+ if (tx->xsk_pool)
+ gve_tx_process_xsk_completions(tx);
+
u64_stats_update_begin(&tx->statss);
tx->bytes_done += pkt_compl_bytes + reinject_compl_bytes;
tx->pkt_done += pkt_compl_pkts + reinject_compl_pkts;
@@ -1325,3 +1499,111 @@ bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean)
compl_desc = &tx->dqo.compl_ring[tx->dqo_compl.head];
return compl_desc->generation != tx->dqo_compl.cur_gen_bit;
}
+
+bool gve_xsk_tx_poll_dqo(struct gve_notify_block *rx_block, int budget)
+{
+ struct gve_rx_ring *rx = rx_block->rx;
+ struct gve_priv *priv = rx->gve;
+ struct gve_tx_ring *tx;
+
+ tx = &priv->tx[gve_xdp_tx_queue_id(priv, rx->q_num)];
+ if (tx->xsk_pool)
+ return gve_xsk_tx_dqo(priv, tx, budget);
+
+ return 0;
+}
+
+bool gve_xdp_poll_dqo(struct gve_notify_block *block)
+{
+ struct gve_tx_compl_desc *compl_desc;
+ struct gve_tx_ring *tx = block->tx;
+ struct gve_priv *priv = block->priv;
+
+ gve_clean_tx_done_dqo(priv, tx, &block->napi);
+
+ /* Return true if we still have work. */
+ compl_desc = &tx->dqo.compl_ring[tx->dqo_compl.head];
+ return compl_desc->generation != tx->dqo_compl.cur_gen_bit;
+}
+
+int gve_xdp_xmit_one_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
+ struct xdp_frame *xdpf)
+{
+ struct gve_tx_pending_packet_dqo *pkt;
+ u32 desc_idx = tx->dqo_tx.tail;
+ s16 completion_tag;
+ int num_descs = 1;
+ dma_addr_t addr;
+ int err;
+
+ if (unlikely(!gve_has_tx_slots_available(tx, num_descs)))
+ return -EBUSY;
+
+ pkt = gve_alloc_pending_packet(tx);
+ if (unlikely(!pkt))
+ return -EBUSY;
+
+ pkt->type = GVE_TX_PENDING_PACKET_DQO_XDP_FRAME;
+ pkt->num_bufs = 0;
+ pkt->xdpf = xdpf;
+ completion_tag = pkt - tx->dqo.pending_packets;
+
+ /* Generate Packet Descriptor */
+ addr = dma_map_single(tx->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
+ err = dma_mapping_error(tx->dev, addr);
+ if (unlikely(err))
+ goto err;
+
+ dma_unmap_len_set(pkt, len[pkt->num_bufs], xdpf->len);
+ dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
+ pkt->num_bufs++;
+
+ gve_tx_fill_pkt_desc_dqo(tx, &desc_idx,
+ false, xdpf->len,
+ addr, completion_tag, true,
+ false);
+
+ gve_tx_update_tail(tx, desc_idx);
+ return 0;
+
+err:
+ pkt->xdpf = NULL;
+ pkt->num_bufs = 0;
+ gve_free_pending_packet(tx, pkt);
+ return err;
+}
+
+int gve_xdp_xmit_dqo(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_tx_ring *tx;
+ int i, err = 0, qid;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ qid = gve_xdp_tx_queue_id(priv,
+ smp_processor_id() % priv->tx_cfg.num_xdp_queues);
+
+ tx = &priv->tx[qid];
+
+ spin_lock(&tx->dqo_tx.xdp_lock);
+ for (i = 0; i < n; i++) {
+ err = gve_xdp_xmit_one_dqo(priv, tx, frames[i]);
+ if (err)
+ break;
+ }
+
+ if (flags & XDP_XMIT_FLUSH)
+ gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail);
+
+ spin_unlock(&tx->dqo_tx.xdp_lock);
+
+ u64_stats_update_begin(&tx->statss);
+ tx->xdp_xmit += n;
+ tx->xdp_xmit_errors += n - i;
+ u64_stats_update_end(&tx->statss);
+
+ return i ? i : err;
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
index 7725cb0c5c8a..ea09a09c451b 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
@@ -258,6 +258,7 @@ struct hbg_stats {
u64 tx_dma_err_cnt;
u64 np_link_fail_cnt;
+ u64 reset_fail_cnt;
};
struct hbg_priv {
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c
index f23fb5920c3c..c0ce74cf7382 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c
@@ -156,6 +156,7 @@ static const struct hbg_push_stats_info hbg_push_stats_list[] = {
HBG_PUSH_STATS_I(tx_drop_cnt, 84),
HBG_PUSH_STATS_I(tx_excessive_length_drop_cnt, 85),
HBG_PUSH_STATS_I(tx_dma_err_cnt, 86),
+ HBG_PUSH_STATS_I(reset_fail_cnt, 87),
};
static int hbg_push_msg_send(struct hbg_priv *priv,
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
index ff3295b60a69..503cfbfb4a8a 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
@@ -68,6 +68,7 @@ static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type)
clear_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_RESET);
if (ret) {
+ priv->stats.reset_fail_cnt++;
set_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
}
@@ -88,6 +89,7 @@ static int hbg_reset_done(struct hbg_priv *priv, enum hbg_reset_type type)
clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
ret = hbg_rebuild(priv);
if (ret) {
+ priv->stats.reset_fail_cnt++;
set_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
dev_err(&priv->pdev->dev, "failed to rebuild after reset\n");
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
index 55520053270a..1d62ff913737 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
@@ -84,6 +84,7 @@ static const struct hbg_ethtool_stats hbg_ethtool_stats_info[] = {
HBG_REG_TX_EXCESSIVE_LENGTH_DROP_ADDR),
HBG_STATS_I(tx_dma_err_cnt),
HBG_STATS_I(tx_timeout_cnt),
+ HBG_STATS_I(reset_fail_cnt),
};
static const struct hbg_ethtool_stats hbg_ethtool_rmon_stats_info[] = {
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
index 9b65eef62b3f..8cca8316ba40 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
@@ -18,6 +18,13 @@
#define HBG_ENDIAN_CTRL_LE_DATA_BE 0x0
#define HBG_PCU_FRAME_LEN_PLUS 4
+#define HBG_FIFO_TX_FULL_THRSLD 0x3F0
+#define HBG_FIFO_TX_EMPTY_THRSLD 0x1F0
+#define HBG_FIFO_RX_FULL_THRSLD 0x240
+#define HBG_FIFO_RX_EMPTY_THRSLD 0x190
+#define HBG_CFG_FIFO_FULL_THRSLD 0x10
+#define HBG_CFG_FIFO_EMPTY_THRSLD 0x01
+
static bool hbg_hw_spec_is_valid(struct hbg_priv *priv)
{
return hbg_reg_read(priv, HBG_REG_SPEC_VALID_ADDR) &&
@@ -168,6 +175,11 @@ static void hbg_hw_set_mac_max_frame_len(struct hbg_priv *priv,
void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu)
{
+ /* burst_len BIT(29) set to 1 can improve the TX performance.
+ * But packet drop occurs when mtu > 2000.
+ * So, BIT(29) reset to 0 when mtu > 2000.
+ */
+ u32 burst_len_bit = (mtu > 2000) ? 0 : 1;
u32 frame_len;
frame_len = mtu + VLAN_HLEN * priv->dev_specs.vlan_layers +
@@ -175,6 +187,9 @@ void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu)
hbg_hw_set_pcu_max_frame_len(priv, frame_len);
hbg_hw_set_mac_max_frame_len(priv, frame_len);
+
+ hbg_reg_write_field(priv, HBG_REG_BRUST_LENGTH_ADDR,
+ HBG_REG_BRUST_LENGTH_B, burst_len_bit);
}
void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable)
@@ -264,6 +279,41 @@ void hbg_hw_set_rx_pause_mac_addr(struct hbg_priv *priv, u64 mac_addr)
hbg_reg_write64(priv, HBG_REG_FD_FC_ADDR_LOW_ADDR, mac_addr);
}
+static void hbg_hw_set_fifo_thrsld(struct hbg_priv *priv,
+ u32 full, u32 empty, enum hbg_dir dir)
+{
+ u32 value = 0;
+
+ value |= FIELD_PREP(HBG_REG_FIFO_THRSLD_FULL_M, full);
+ value |= FIELD_PREP(HBG_REG_FIFO_THRSLD_EMPTY_M, empty);
+
+ if (dir & HBG_DIR_TX)
+ hbg_reg_write(priv, HBG_REG_TX_FIFO_THRSLD_ADDR, value);
+
+ if (dir & HBG_DIR_RX)
+ hbg_reg_write(priv, HBG_REG_RX_FIFO_THRSLD_ADDR, value);
+}
+
+static void hbg_hw_set_cfg_fifo_thrsld(struct hbg_priv *priv,
+ u32 full, u32 empty, enum hbg_dir dir)
+{
+ u32 value;
+
+ value = hbg_reg_read(priv, HBG_REG_CFG_FIFO_THRSLD_ADDR);
+
+ if (dir & HBG_DIR_TX) {
+ value |= FIELD_PREP(HBG_REG_CFG_FIFO_THRSLD_TX_FULL_M, full);
+ value |= FIELD_PREP(HBG_REG_CFG_FIFO_THRSLD_TX_EMPTY_M, empty);
+ }
+
+ if (dir & HBG_DIR_RX) {
+ value |= FIELD_PREP(HBG_REG_CFG_FIFO_THRSLD_RX_FULL_M, full);
+ value |= FIELD_PREP(HBG_REG_CFG_FIFO_THRSLD_RX_EMPTY_M, empty);
+ }
+
+ hbg_reg_write(priv, HBG_REG_CFG_FIFO_THRSLD_ADDR, value);
+}
+
static void hbg_hw_init_transmit_ctrl(struct hbg_priv *priv)
{
u32 ctrl = 0;
@@ -324,5 +374,12 @@ int hbg_hw_init(struct hbg_priv *priv)
hbg_hw_init_rx_control(priv);
hbg_hw_init_transmit_ctrl(priv);
+
+ hbg_hw_set_fifo_thrsld(priv, HBG_FIFO_TX_FULL_THRSLD,
+ HBG_FIFO_TX_EMPTY_THRSLD, HBG_DIR_TX);
+ hbg_hw_set_fifo_thrsld(priv, HBG_FIFO_RX_FULL_THRSLD,
+ HBG_FIFO_RX_EMPTY_THRSLD, HBG_DIR_RX);
+ hbg_hw_set_cfg_fifo_thrsld(priv, HBG_CFG_FIFO_FULL_THRSLD,
+ HBG_CFG_FIFO_EMPTY_THRSLD, HBG_DIR_TX_RX);
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
index 42b0083c9193..8b7b476ed7fb 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
@@ -2,6 +2,7 @@
// Copyright (c) 2024 Hisilicon Limited.
#include <linux/phy.h>
+#include <linux/phy_fixed.h>
#include <linux/rtnetlink.h>
#include "hbg_common.h"
#include "hbg_hw.h"
@@ -19,6 +20,7 @@
#define HBG_MDIO_OP_INTERVAL_US (5 * 1000)
#define HBG_NP_LINK_FAIL_RETRY_TIMES 5
+#define HBG_NO_PHY 0xFF
static void hbg_mdio_set_command(struct hbg_mac *mac, u32 cmd)
{
@@ -229,6 +231,39 @@ void hbg_phy_stop(struct hbg_priv *priv)
phy_stop(priv->mac.phydev);
}
+static void hbg_fixed_phy_uninit(void *data)
+{
+ fixed_phy_unregister((struct phy_device *)data);
+}
+
+static int hbg_fixed_phy_init(struct hbg_priv *priv)
+{
+ struct fixed_phy_status hbg_fixed_phy_status = {
+ .link = 1,
+ .speed = SPEED_1000,
+ .duplex = DUPLEX_FULL,
+ .pause = 1,
+ .asym_pause = 1,
+ };
+ struct device *dev = &priv->pdev->dev;
+ struct phy_device *phydev;
+ int ret;
+
+ phydev = fixed_phy_register(&hbg_fixed_phy_status, NULL);
+ if (IS_ERR(phydev)) {
+ dev_err_probe(dev, PTR_ERR(phydev),
+ "failed to register fixed PHY device\n");
+ return PTR_ERR(phydev);
+ }
+
+ ret = devm_add_action_or_reset(dev, hbg_fixed_phy_uninit, phydev);
+ if (ret)
+ return ret;
+
+ priv->mac.phydev = phydev;
+ return hbg_phy_connect(priv);
+}
+
int hbg_mdio_init(struct hbg_priv *priv)
{
struct device *dev = &priv->pdev->dev;
@@ -238,6 +273,9 @@ int hbg_mdio_init(struct hbg_priv *priv)
int ret;
mac->phy_addr = priv->dev_specs.phy_addr;
+ if (mac->phy_addr == HBG_NO_PHY)
+ return hbg_fixed_phy_init(priv);
+
mdio_bus = devm_mdiobus_alloc(dev);
if (!mdio_bus)
return dev_err_probe(dev, -ENOMEM,
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
index a6e7f5e62b48..a39d1e796e4a 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
@@ -141,7 +141,13 @@
/* PCU */
#define HBG_REG_TX_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0420)
#define HBG_REG_RX_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0424)
+#define HBG_REG_FIFO_THRSLD_FULL_M GENMASK(25, 16)
+#define HBG_REG_FIFO_THRSLD_EMPTY_M GENMASK(9, 0)
#define HBG_REG_CFG_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0428)
+#define HBG_REG_CFG_FIFO_THRSLD_TX_FULL_M GENMASK(31, 24)
+#define HBG_REG_CFG_FIFO_THRSLD_TX_EMPTY_M GENMASK(23, 16)
+#define HBG_REG_CFG_FIFO_THRSLD_RX_FULL_M GENMASK(15, 8)
+#define HBG_REG_CFG_FIFO_THRSLD_RX_EMPTY_M GENMASK(7, 0)
#define HBG_REG_CF_INTRPT_MSK_ADDR (HBG_REG_SGMII_BASE + 0x042C)
#define HBG_INT_MSK_WE_ERR_B BIT(31)
#define HBG_INT_MSK_RBREQ_ERR_B BIT(30)
@@ -185,6 +191,8 @@
#define HBG_REG_TX_CFF_ADDR_2_ADDR (HBG_REG_SGMII_BASE + 0x0490)
#define HBG_REG_TX_CFF_ADDR_3_ADDR (HBG_REG_SGMII_BASE + 0x0494)
#define HBG_REG_RX_CFF_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x04A0)
+#define HBG_REG_BRUST_LENGTH_ADDR (HBG_REG_SGMII_BASE + 0x04C4)
+#define HBG_REG_BRUST_LENGTH_B BIT(29)
#define HBG_REG_RX_BUF_SIZE_ADDR (HBG_REG_SGMII_BASE + 0x04E4)
#define HBG_REG_RX_BUF_SIZE_M GENMASK(15, 0)
#define HBG_REG_BUS_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x04E8)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 4e44f28288f9..3b548f71fa8a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -339,6 +339,10 @@ enum hnae3_dbg_cmd {
HNAE3_DBG_CMD_UNKNOWN,
};
+#define hnae3_seq_file_to_ae_dev(s) (dev_get_drvdata((s)->private))
+#define hnae3_seq_file_to_handle(s) \
+ (((struct hnae3_ae_dev *)hnae3_seq_file_to_ae_dev(s))->handle)
+
enum hnae3_tc_map_mode {
HNAE3_TC_MAP_MODE_PRIO,
HNAE3_TC_MAP_MODE_DSCP,
@@ -434,8 +438,11 @@ struct hnae3_ae_dev {
u32 dev_version;
DECLARE_BITMAP(caps, HNAE3_DEV_CAPS_MAX_NUM);
void *priv;
+ struct hnae3_handle *handle;
};
+typedef int (*read_func)(struct seq_file *s, void *data);
+
/* This struct defines the operation on the handle.
*
* init_ae_dev(): (mandatory)
@@ -580,8 +587,6 @@ struct hnae3_ae_dev {
* Delete clsflower rule
* cls_flower_active
* Check if any cls flower rule exist
- * dbg_read_cmd
- * Execute debugfs read command.
* set_tx_hwts_info
* Save information for 1588 tx packet
* get_rx_hwts
@@ -594,6 +599,8 @@ struct hnae3_ae_dev {
* Get wake on lan info
* set_wol
* Config wake on lan
+ * dbg_get_read_func
+ * Return the read func for debugfs seq file
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -690,9 +697,9 @@ struct hnae3_ae_ops {
int (*set_rss)(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc);
int (*set_rss_tuple)(struct hnae3_handle *handle,
- struct ethtool_rxnfc *cmd);
+ const struct ethtool_rxfh_fields *cmd);
int (*get_rss_tuple)(struct hnae3_handle *handle,
- struct ethtool_rxnfc *cmd);
+ struct ethtool_rxfh_fields *cmd);
int (*get_tc_size)(struct hnae3_handle *handle);
@@ -748,8 +755,6 @@ struct hnae3_ae_ops {
void (*enable_fd)(struct hnae3_handle *handle, bool enable);
int (*add_arfs_entry)(struct hnae3_handle *handle, u16 queue_id,
u16 flow_id, struct flow_keys *fkeys);
- int (*dbg_read_cmd)(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
- char *buf, int len);
pci_ers_result_t (*handle_hw_ras_error)(struct hnae3_ae_dev *ae_dev);
bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
bool (*ae_dev_resetting)(struct hnae3_handle *handle);
@@ -796,6 +801,9 @@ struct hnae3_ae_ops {
struct ethtool_wolinfo *wol);
int (*set_wol)(struct hnae3_handle *handle,
struct ethtool_wolinfo *wol);
+ int (*dbg_get_read_func)(struct hnae3_handle *handle,
+ enum hnae3_dbg_cmd cmd,
+ read_func *func);
};
struct hnae3_dcb_ops {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
index 4ad4e8ab2f1f..37396ca4ecfc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
@@ -348,7 +348,7 @@ static int hclge_comm_cmd_csq_clean(struct hclge_comm_hw *hw)
static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw)
{
u32 head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
- return head == hw->cmq.csq.next_to_use;
+ return head == (u32)hw->cmq.csq.next_to_use;
}
static u32 hclge_get_cmdq_tx_timeout(u16 opcode, u32 tx_timeout)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
index 4e2bb6556b1c..1eca53aaf598 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
@@ -151,7 +151,7 @@ EXPORT_SYMBOL_GPL(hclge_comm_set_rss_hash_key);
int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw,
struct hclge_comm_rss_cfg *rss_cfg,
- struct ethtool_rxnfc *nfc)
+ const struct ethtool_rxfh_fields *nfc)
{
struct hclge_comm_rss_input_tuple_cmd *req;
struct hclge_desc desc;
@@ -422,7 +422,7 @@ int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
}
EXPORT_SYMBOL_GPL(hclge_comm_set_rss_algo_key);
-static u8 hclge_comm_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
+static u8 hclge_comm_get_rss_hash_bits(const struct ethtool_rxfh_fields *nfc)
{
u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_COMM_S_PORT_BIT : 0;
@@ -448,7 +448,7 @@ static u8 hclge_comm_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
}
int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg,
- struct ethtool_rxnfc *nfc,
+ const struct ethtool_rxfh_fields *nfc,
struct hnae3_ae_dev *ae_dev,
struct hclge_comm_rss_input_tuple_cmd *req)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
index cdafa63fe38b..cbc02b50c6e7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
@@ -108,7 +108,7 @@ void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg,
int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
const u8 *key);
int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg,
- struct ethtool_rxnfc *nfc,
+ const struct ethtool_rxfh_fields *nfc,
struct hnae3_ae_dev *ae_dev,
struct hclge_comm_rss_input_tuple_cmd *req);
u64 hclge_comm_convert_rss_tuple(u8 tuple_sets);
@@ -129,5 +129,5 @@ int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg,
int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw,
struct hclge_comm_rss_cfg *rss_cfg,
- struct ethtool_rxnfc *nfc);
+ const struct ethtool_rxfh_fields *nfc);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 4e5d8bc39a1b..0255c8acb744 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -3,6 +3,7 @@
#include <linux/debugfs.h>
#include <linux/device.h>
+#include <linux/seq_file.h>
#include <linux/string_choices.h>
#include "hnae3.h"
@@ -40,323 +41,279 @@ static struct hns3_dbg_dentry_info hns3_dbg_dentry[] = {
};
static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd);
-static int hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd);
+static int hns3_dbg_common_init_t1(struct hnae3_handle *handle, u32 cmd);
+static int hns3_dbg_common_init_t2(struct hnae3_handle *handle, u32 cmd);
static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
{
.name = "tm_nodes",
.cmd = HNAE3_DBG_CMD_TM_NODES,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tm_priority",
.cmd = HNAE3_DBG_CMD_TM_PRI,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tm_qset",
.cmd = HNAE3_DBG_CMD_TM_QSET,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN_1MB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tm_map",
.cmd = HNAE3_DBG_CMD_TM_MAP,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN_1MB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tm_pg",
.cmd = HNAE3_DBG_CMD_TM_PG,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tm_port",
.cmd = HNAE3_DBG_CMD_TM_PORT,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tc_sch_info",
.cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "qos_pause_cfg",
.cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "qos_pri_map",
.cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "qos_dscp_map",
.cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "qos_buf_cfg",
.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "dev_info",
.cmd = HNAE3_DBG_CMD_DEV_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
{
.name = "tx_bd_queue",
.cmd = HNAE3_DBG_CMD_TX_BD,
.dentry = HNS3_DBG_DENTRY_TX_BD,
- .buf_len = HNS3_DBG_READ_LEN_5MB,
.init = hns3_dbg_bd_file_init,
},
{
.name = "rx_bd_queue",
.cmd = HNAE3_DBG_CMD_RX_BD,
.dentry = HNS3_DBG_DENTRY_RX_BD,
- .buf_len = HNS3_DBG_READ_LEN_4MB,
.init = hns3_dbg_bd_file_init,
},
{
.name = "uc",
.cmd = HNAE3_DBG_CMD_MAC_UC,
.dentry = HNS3_DBG_DENTRY_MAC,
- .buf_len = HNS3_DBG_READ_LEN_128KB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "mc",
.cmd = HNAE3_DBG_CMD_MAC_MC,
.dentry = HNS3_DBG_DENTRY_MAC,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "mng_tbl",
.cmd = HNAE3_DBG_CMD_MNG_TBL,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "loopback",
.cmd = HNAE3_DBG_CMD_LOOPBACK,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "interrupt_info",
.cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "reset_info",
.cmd = HNAE3_DBG_CMD_RESET_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "imp_info",
.cmd = HNAE3_DBG_CMD_IMP_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "ncl_config",
.cmd = HNAE3_DBG_CMD_NCL_CONFIG,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN_128KB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "mac_tnl_status",
.cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "bios_common",
.cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "ssu",
.cmd = HNAE3_DBG_CMD_REG_SSU,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "igu_egu",
.cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "rpu",
.cmd = HNAE3_DBG_CMD_REG_RPU,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "ncsi",
.cmd = HNAE3_DBG_CMD_REG_NCSI,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "rtc",
.cmd = HNAE3_DBG_CMD_REG_RTC,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "ppp",
.cmd = HNAE3_DBG_CMD_REG_PPP,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "rcb",
.cmd = HNAE3_DBG_CMD_REG_RCB,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tqp",
.cmd = HNAE3_DBG_CMD_REG_TQP,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN_128KB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "mac",
.cmd = HNAE3_DBG_CMD_REG_MAC,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "dcb",
.cmd = HNAE3_DBG_CMD_REG_DCB,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "queue_map",
.cmd = HNAE3_DBG_CMD_QUEUE_MAP,
.dentry = HNS3_DBG_DENTRY_QUEUE,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
{
.name = "rx_queue_info",
.cmd = HNAE3_DBG_CMD_RX_QUEUE_INFO,
.dentry = HNS3_DBG_DENTRY_QUEUE,
- .buf_len = HNS3_DBG_READ_LEN_1MB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
{
.name = "tx_queue_info",
.cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO,
.dentry = HNS3_DBG_DENTRY_QUEUE,
- .buf_len = HNS3_DBG_READ_LEN_1MB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
{
.name = "fd_tcam",
.cmd = HNAE3_DBG_CMD_FD_TCAM,
.dentry = HNS3_DBG_DENTRY_FD,
- .buf_len = HNS3_DBG_READ_LEN_1MB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "service_task_info",
.cmd = HNAE3_DBG_CMD_SERV_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "vlan_config",
.cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "ptp_info",
.cmd = HNAE3_DBG_CMD_PTP_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "fd_counter",
.cmd = HNAE3_DBG_CMD_FD_COUNTER,
.dentry = HNS3_DBG_DENTRY_FD,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "umv_info",
.cmd = HNAE3_DBG_CMD_UMV_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "page_pool_info",
.cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
{
.name = "coalesce_info",
.cmd = HNAE3_DBG_CMD_COAL_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN_1MB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
};
@@ -421,71 +378,17 @@ static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
}
};
-static const struct hns3_dbg_item coal_info_items[] = {
- { "VEC_ID", 2 },
- { "ALGO_STATE", 2 },
- { "PROFILE_ID", 2 },
- { "CQE_MODE", 2 },
- { "TUNE_STATE", 2 },
- { "STEPS_LEFT", 2 },
- { "STEPS_RIGHT", 2 },
- { "TIRED", 2 },
- { "SW_GL", 2 },
- { "SW_QL", 2 },
- { "HW_GL", 2 },
- { "HW_QL", 2 },
-};
-
static const char * const dim_cqe_mode_str[] = { "EQE", "CQE" };
static const char * const dim_state_str[] = { "START", "IN_PROG", "APPLY" };
static const char * const
dim_tune_stat_str[] = { "ON_TOP", "TIRED", "RIGHT", "LEFT" };
-static void hns3_dbg_fill_content(char *content, u16 len,
- const struct hns3_dbg_item *items,
- const char **result, u16 size)
-{
-#define HNS3_DBG_LINE_END_LEN 2
- char *pos = content;
- u16 item_len;
- u16 i;
-
- if (!len) {
- return;
- } else if (len <= HNS3_DBG_LINE_END_LEN) {
- *pos++ = '\0';
- return;
- }
-
- memset(content, ' ', len);
- len -= HNS3_DBG_LINE_END_LEN;
-
- for (i = 0; i < size; i++) {
- item_len = strlen(items[i].name) + items[i].interval;
- if (len < item_len)
- break;
-
- if (result) {
- if (item_len < strlen(result[i]))
- break;
- memcpy(pos, result[i], strlen(result[i]));
- } else {
- memcpy(pos, items[i].name, strlen(items[i].name));
- }
- pos += item_len;
- len -= item_len;
- }
- *pos++ = '\n';
- *pos++ = '\0';
-}
-
static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector,
- char **result, int i, bool is_tx)
+ struct seq_file *s, int i, bool is_tx)
{
unsigned int gl_offset, ql_offset;
struct hns3_enet_coalesce *coal;
unsigned int reg_val;
- unsigned int j = 0;
struct dim *dim;
bool ql_enable;
@@ -503,193 +406,96 @@ static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector,
ql_enable = tqp_vector->rx_group.coal.ql_enable;
}
- sprintf(result[j++], "%d", i);
- sprintf(result[j++], "%s", dim->state < ARRAY_SIZE(dim_state_str) ?
- dim_state_str[dim->state] : "unknown");
- sprintf(result[j++], "%u", dim->profile_ix);
- sprintf(result[j++], "%s", dim->mode < ARRAY_SIZE(dim_cqe_mode_str) ?
- dim_cqe_mode_str[dim->mode] : "unknown");
- sprintf(result[j++], "%s",
- dim->tune_state < ARRAY_SIZE(dim_tune_stat_str) ?
- dim_tune_stat_str[dim->tune_state] : "unknown");
- sprintf(result[j++], "%u", dim->steps_left);
- sprintf(result[j++], "%u", dim->steps_right);
- sprintf(result[j++], "%u", dim->tired);
- sprintf(result[j++], "%u", coal->int_gl);
- sprintf(result[j++], "%u", coal->int_ql);
+ seq_printf(s, "%-8d", i);
+ seq_printf(s, "%-12s", dim->state < ARRAY_SIZE(dim_state_str) ?
+ dim_state_str[dim->state] : "unknown");
+ seq_printf(s, "%-12u", dim->profile_ix);
+ seq_printf(s, "%-10s", dim->mode < ARRAY_SIZE(dim_cqe_mode_str) ?
+ dim_cqe_mode_str[dim->mode] : "unknown");
+ seq_printf(s, "%-12s", dim->tune_state < ARRAY_SIZE(dim_tune_stat_str) ?
+ dim_tune_stat_str[dim->tune_state] : "unknown");
+ seq_printf(s, "%-12u%-13u%-7u%-7u%-7u", dim->steps_left,
+ dim->steps_right, dim->tired, coal->int_gl, coal->int_ql);
reg_val = readl(tqp_vector->mask_addr + gl_offset) &
HNS3_VECTOR_GL_MASK;
- sprintf(result[j++], "%u", reg_val);
+ seq_printf(s, "%-7u", reg_val);
if (ql_enable) {
reg_val = readl(tqp_vector->mask_addr + ql_offset) &
HNS3_VECTOR_QL_MASK;
- sprintf(result[j++], "%u", reg_val);
+ seq_printf(s, "%u\n", reg_val);
} else {
- sprintf(result[j++], "NA");
+ seq_puts(s, "NA\n");
}
}
-static void hns3_dump_coal_info(struct hnae3_handle *h, char *buf, int len,
- int *pos, bool is_tx)
+static void hns3_dump_coal_info(struct seq_file *s, bool is_tx)
{
- char data_str[ARRAY_SIZE(coal_info_items)][HNS3_DBG_DATA_STR_LEN];
- char *result[ARRAY_SIZE(coal_info_items)];
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
struct hns3_enet_tqp_vector *tqp_vector;
struct hns3_nic_priv *priv = h->priv;
- char content[HNS3_DBG_INFO_LEN];
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(coal_info_items); i++)
- result[i] = &data_str[i][0];
+ seq_printf(s, "%s interrupt coalesce info:\n", is_tx ? "tx" : "rx");
- *pos += scnprintf(buf + *pos, len - *pos,
- "%s interrupt coalesce info:\n",
- is_tx ? "tx" : "rx");
- hns3_dbg_fill_content(content, sizeof(content), coal_info_items,
- NULL, ARRAY_SIZE(coal_info_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ seq_puts(s, "VEC_ID ALGO_STATE PROFILE_ID CQE_MODE TUNE_STATE ");
+ seq_puts(s, "STEPS_LEFT STEPS_RIGHT TIRED SW_GL SW_QL ");
+ seq_puts(s, "HW_GL HW_QL\n");
for (i = 0; i < priv->vector_num; i++) {
tqp_vector = &priv->tqp_vector[i];
- hns3_get_coal_info(tqp_vector, result, i, is_tx);
- hns3_dbg_fill_content(content, sizeof(content), coal_info_items,
- (const char **)result,
- ARRAY_SIZE(coal_info_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ hns3_get_coal_info(tqp_vector, s, i, is_tx);
}
}
-static int hns3_dbg_coal_info(struct hnae3_handle *h, char *buf, int len)
+static int hns3_dbg_coal_info(struct seq_file *s, void *data)
{
- int pos = 0;
-
- hns3_dump_coal_info(h, buf, len, &pos, true);
- pos += scnprintf(buf + pos, len - pos, "\n");
- hns3_dump_coal_info(h, buf, len, &pos, false);
+ hns3_dump_coal_info(s, true);
+ seq_puts(s, "\n");
+ hns3_dump_coal_info(s, false);
return 0;
}
-static const struct hns3_dbg_item tx_spare_info_items[] = {
- { "QUEUE_ID", 2 },
- { "COPYBREAK", 2 },
- { "LEN", 7 },
- { "NTU", 4 },
- { "NTC", 4 },
- { "LTC", 4 },
- { "DMA", 17 },
-};
-
-static void hns3_dbg_tx_spare_info(struct hns3_enet_ring *ring, char *buf,
- int len, u32 ring_num, int *pos)
-{
- char data_str[ARRAY_SIZE(tx_spare_info_items)][HNS3_DBG_DATA_STR_LEN];
- struct hns3_tx_spare *tx_spare = ring->tx_spare;
- char *result[ARRAY_SIZE(tx_spare_info_items)];
- char content[HNS3_DBG_INFO_LEN];
- u32 i, j;
-
- if (!tx_spare) {
- *pos += scnprintf(buf + *pos, len - *pos,
- "tx spare buffer is not enabled\n");
- return;
- }
-
- for (i = 0; i < ARRAY_SIZE(tx_spare_info_items); i++)
- result[i] = &data_str[i][0];
-
- *pos += scnprintf(buf + *pos, len - *pos, "tx spare buffer info\n");
- hns3_dbg_fill_content(content, sizeof(content), tx_spare_info_items,
- NULL, ARRAY_SIZE(tx_spare_info_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
-
- for (i = 0; i < ring_num; i++) {
- j = 0;
- sprintf(result[j++], "%u", i);
- sprintf(result[j++], "%u", ring->tx_copybreak);
- sprintf(result[j++], "%u", tx_spare->len);
- sprintf(result[j++], "%u", tx_spare->next_to_use);
- sprintf(result[j++], "%u", tx_spare->next_to_clean);
- sprintf(result[j++], "%u", tx_spare->last_to_clean);
- sprintf(result[j++], "%pad", &tx_spare->dma);
- hns3_dbg_fill_content(content, sizeof(content),
- tx_spare_info_items,
- (const char **)result,
- ARRAY_SIZE(tx_spare_info_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
- }
-}
-
-static const struct hns3_dbg_item rx_queue_info_items[] = {
- { "QUEUE_ID", 2 },
- { "BD_NUM", 2 },
- { "BD_LEN", 2 },
- { "TAIL", 2 },
- { "HEAD", 2 },
- { "FBDNUM", 2 },
- { "PKTNUM", 5 },
- { "COPYBREAK", 2 },
- { "RING_EN", 2 },
- { "RX_RING_EN", 2 },
- { "BASE_ADDR", 10 },
-};
-
static void hns3_dump_rx_queue_info(struct hns3_enet_ring *ring,
- struct hnae3_ae_dev *ae_dev, char **result,
- u32 index)
+ struct seq_file *s, u32 index)
{
+ struct hnae3_ae_dev *ae_dev = hnae3_seq_file_to_ae_dev(s);
+ void __iomem *base = ring->tqp->io_base;
u32 base_add_l, base_add_h;
- u32 j = 0;
-
- sprintf(result[j++], "%u", index);
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_BD_NUM_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_BD_LEN_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_TAIL_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_HEAD_REG));
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_FBDNUM_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_PKTNUM_RECORD_REG));
- sprintf(result[j++], "%u", ring->rx_copybreak);
-
- sprintf(result[j++], "%s",
- str_on_off(readl_relaxed(ring->tqp->io_base +
- HNS3_RING_EN_REG)));
+ seq_printf(s, "%-10u", index);
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_RX_RING_BD_NUM_REG));
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_RX_RING_BD_LEN_REG));
+ seq_printf(s, "%-6u",
+ readl_relaxed(base + HNS3_RING_RX_RING_TAIL_REG));
+ seq_printf(s, "%-6u",
+ readl_relaxed(base + HNS3_RING_RX_RING_HEAD_REG));
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_RX_RING_FBDNUM_REG));
+ seq_printf(s, "%-11u", readl_relaxed(base +
+ HNS3_RING_RX_RING_PKTNUM_RECORD_REG));
+ seq_printf(s, "%-11u", ring->rx_copybreak);
+ seq_printf(s, "%-9s",
+ str_on_off(readl_relaxed(base + HNS3_RING_EN_REG)));
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
- sprintf(result[j++], "%s",
- str_on_off(readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_EN_REG)));
+ seq_printf(s, "%-12s", str_on_off(readl_relaxed(base +
+ HNS3_RING_RX_EN_REG)));
else
- sprintf(result[j++], "%s", "NA");
+ seq_printf(s, "%-12s", "NA");
- base_add_h = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_BASEADDR_H_REG);
- base_add_l = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_BASEADDR_L_REG);
- sprintf(result[j++], "0x%08x%08x", base_add_h, base_add_l);
+ base_add_h = readl_relaxed(base + HNS3_RING_RX_RING_BASEADDR_H_REG);
+ base_add_l = readl_relaxed(base + HNS3_RING_RX_RING_BASEADDR_L_REG);
+ seq_printf(s, "0x%08x%08x\n", base_add_h, base_add_l);
}
-static int hns3_dbg_rx_queue_info(struct hnae3_handle *h,
- char *buf, int len)
+static int hns3_dbg_rx_queue_info(struct seq_file *s, void *data)
{
- char data_str[ARRAY_SIZE(rx_queue_info_items)][HNS3_DBG_DATA_STR_LEN];
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- char *result[ARRAY_SIZE(rx_queue_info_items)];
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
struct hns3_nic_priv *priv = h->priv;
- char content[HNS3_DBG_INFO_LEN];
struct hns3_enet_ring *ring;
- int pos = 0;
u32 i;
if (!priv->ring) {
@@ -697,12 +503,9 @@ static int hns3_dbg_rx_queue_info(struct hnae3_handle *h,
return -EFAULT;
}
- for (i = 0; i < ARRAY_SIZE(rx_queue_info_items); i++)
- result[i] = &data_str[i][0];
+ seq_puts(s, "QUEUE_ID BD_NUM BD_LEN TAIL HEAD FBDNUM ");
+ seq_puts(s, "PKTNUM COPYBREAK RING_EN RX_RING_EN BASE_ADDR\n");
- hns3_dbg_fill_content(content, sizeof(content), rx_queue_info_items,
- NULL, ARRAY_SIZE(rx_queue_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
for (i = 0; i < h->kinfo.num_tqps; i++) {
/* Each cycle needs to determine whether the instance is reset,
* to prevent reference to invalid memory. And need to ensure
@@ -713,88 +516,51 @@ static int hns3_dbg_rx_queue_info(struct hnae3_handle *h,
return -EPERM;
ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)];
- hns3_dump_rx_queue_info(ring, ae_dev, result, i);
- hns3_dbg_fill_content(content, sizeof(content),
- rx_queue_info_items,
- (const char **)result,
- ARRAY_SIZE(rx_queue_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ hns3_dump_rx_queue_info(ring, s, i);
}
return 0;
}
-static const struct hns3_dbg_item tx_queue_info_items[] = {
- { "QUEUE_ID", 2 },
- { "BD_NUM", 2 },
- { "TC", 2 },
- { "TAIL", 2 },
- { "HEAD", 2 },
- { "FBDNUM", 2 },
- { "OFFSET", 2 },
- { "PKTNUM", 5 },
- { "RING_EN", 2 },
- { "TX_RING_EN", 2 },
- { "BASE_ADDR", 10 },
-};
-
static void hns3_dump_tx_queue_info(struct hns3_enet_ring *ring,
- struct hnae3_ae_dev *ae_dev, char **result,
- u32 index)
+ struct seq_file *s, u32 index)
{
+ struct hnae3_ae_dev *ae_dev = hnae3_seq_file_to_ae_dev(s);
+ void __iomem *base = ring->tqp->io_base;
u32 base_add_l, base_add_h;
- u32 j = 0;
-
- sprintf(result[j++], "%u", index);
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_BD_NUM_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_TC_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_TAIL_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_HEAD_REG));
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_FBDNUM_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_OFFSET_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_PKTNUM_RECORD_REG));
-
- sprintf(result[j++], "%s",
- str_on_off(readl_relaxed(ring->tqp->io_base +
- HNS3_RING_EN_REG)));
+ seq_printf(s, "%-10u", index);
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_TX_RING_BD_NUM_REG));
+ seq_printf(s, "%-4u", readl_relaxed(base + HNS3_RING_TX_RING_TC_REG));
+ seq_printf(s, "%-6u", readl_relaxed(base + HNS3_RING_TX_RING_TAIL_REG));
+ seq_printf(s, "%-6u", readl_relaxed(base + HNS3_RING_TX_RING_HEAD_REG));
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_TX_RING_FBDNUM_REG));
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_TX_RING_OFFSET_REG));
+ seq_printf(s, "%-11u",
+ readl_relaxed(base + HNS3_RING_TX_RING_PKTNUM_RECORD_REG));
+ seq_printf(s, "%-9s",
+ str_on_off(readl_relaxed(base + HNS3_RING_EN_REG)));
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
- sprintf(result[j++], "%s",
- str_on_off(readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_EN_REG)));
+ seq_printf(s, "%-12s",
+ str_on_off(readl_relaxed(base +
+ HNS3_RING_TX_EN_REG)));
else
- sprintf(result[j++], "%s", "NA");
+ seq_printf(s, "%-12s", "NA");
- base_add_h = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_BASEADDR_H_REG);
- base_add_l = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_BASEADDR_L_REG);
- sprintf(result[j++], "0x%08x%08x", base_add_h, base_add_l);
+ base_add_h = readl_relaxed(base + HNS3_RING_TX_RING_BASEADDR_H_REG);
+ base_add_l = readl_relaxed(base + HNS3_RING_TX_RING_BASEADDR_L_REG);
+ seq_printf(s, "0x%08x%08x\n", base_add_h, base_add_l);
}
-static int hns3_dbg_tx_queue_info(struct hnae3_handle *h,
- char *buf, int len)
+static int hns3_dbg_tx_queue_info(struct seq_file *s, void *data)
{
- char data_str[ARRAY_SIZE(tx_queue_info_items)][HNS3_DBG_DATA_STR_LEN];
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- char *result[ARRAY_SIZE(tx_queue_info_items)];
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
struct hns3_nic_priv *priv = h->priv;
- char content[HNS3_DBG_INFO_LEN];
struct hns3_enet_ring *ring;
- int pos = 0;
u32 i;
if (!priv->ring) {
@@ -802,12 +568,8 @@ static int hns3_dbg_tx_queue_info(struct hnae3_handle *h,
return -EFAULT;
}
- for (i = 0; i < ARRAY_SIZE(tx_queue_info_items); i++)
- result[i] = &data_str[i][0];
-
- hns3_dbg_fill_content(content, sizeof(content), tx_queue_info_items,
- NULL, ARRAY_SIZE(tx_queue_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_puts(s, "QUEUE_ID BD_NUM TC TAIL HEAD FBDNUM OFFSET ");
+ seq_puts(s, "PKTNUM RING_EN TX_RING_EN BASE_ADDR\n");
for (i = 0; i < h->kinfo.num_tqps; i++) {
/* Each cycle needs to determine whether the instance is reset,
@@ -819,338 +581,213 @@ static int hns3_dbg_tx_queue_info(struct hnae3_handle *h,
return -EPERM;
ring = &priv->ring[i];
- hns3_dump_tx_queue_info(ring, ae_dev, result, i);
- hns3_dbg_fill_content(content, sizeof(content),
- tx_queue_info_items,
- (const char **)result,
- ARRAY_SIZE(tx_queue_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ hns3_dump_tx_queue_info(ring, s, i);
}
- hns3_dbg_tx_spare_info(ring, buf, len, h->kinfo.num_tqps, &pos);
-
return 0;
}
-static const struct hns3_dbg_item queue_map_items[] = {
- { "local_queue_id", 2 },
- { "global_queue_id", 2 },
- { "vector_id", 2 },
-};
-
-static int hns3_dbg_queue_map(struct hnae3_handle *h, char *buf, int len)
+static int hns3_dbg_queue_map(struct seq_file *s, void *data)
{
- char data_str[ARRAY_SIZE(queue_map_items)][HNS3_DBG_DATA_STR_LEN];
- char *result[ARRAY_SIZE(queue_map_items)];
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
struct hns3_nic_priv *priv = h->priv;
- char content[HNS3_DBG_INFO_LEN];
- int pos = 0;
- int j;
u32 i;
if (!h->ae_algo->ops->get_global_queue_id)
return -EOPNOTSUPP;
- for (i = 0; i < ARRAY_SIZE(queue_map_items); i++)
- result[i] = &data_str[i][0];
+ seq_puts(s, "local_queue_id global_queue_id vector_id\n");
- hns3_dbg_fill_content(content, sizeof(content), queue_map_items,
- NULL, ARRAY_SIZE(queue_map_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
for (i = 0; i < h->kinfo.num_tqps; i++) {
if (!priv->ring || !priv->ring[i].tqp_vector)
continue;
- j = 0;
- sprintf(result[j++], "%u", i);
- sprintf(result[j++], "%u",
- h->ae_algo->ops->get_global_queue_id(h, i));
- sprintf(result[j++], "%d",
- priv->ring[i].tqp_vector->vector_irq);
- hns3_dbg_fill_content(content, sizeof(content), queue_map_items,
- (const char **)result,
- ARRAY_SIZE(queue_map_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_printf(s, "%-16u%-17u%d\n", i,
+ h->ae_algo->ops->get_global_queue_id(h, i),
+ priv->ring[i].tqp_vector->vector_irq);
}
return 0;
}
-static const struct hns3_dbg_item rx_bd_info_items[] = {
- { "BD_IDX", 3 },
- { "L234_INFO", 2 },
- { "PKT_LEN", 3 },
- { "SIZE", 4 },
- { "RSS_HASH", 4 },
- { "FD_ID", 2 },
- { "VLAN_TAG", 2 },
- { "O_DM_VLAN_ID_FB", 2 },
- { "OT_VLAN_TAG", 2 },
- { "BD_BASE_INFO", 2 },
- { "PTYPE", 2 },
- { "HW_CSUM", 2 },
-};
-
static void hns3_dump_rx_bd_info(struct hns3_nic_priv *priv,
- struct hns3_desc *desc, char **result, int idx)
+ struct hns3_desc *desc, struct seq_file *s,
+ int idx)
{
- unsigned int j = 0;
-
- sprintf(result[j++], "%d", idx);
- sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.l234_info));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.pkt_len));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.size));
- sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.rss_hash));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.fd_id));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.vlan_tag));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.ot_vlan_tag));
- sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.bd_base_info));
+ seq_printf(s, "%-9d%#-11x%-10u%-8u%#-12x%-7u%-10u%-17u%-13u%#-14x",
+ idx, le32_to_cpu(desc->rx.l234_info),
+ le16_to_cpu(desc->rx.pkt_len), le16_to_cpu(desc->rx.size),
+ le32_to_cpu(desc->rx.rss_hash), le16_to_cpu(desc->rx.fd_id),
+ le16_to_cpu(desc->rx.vlan_tag),
+ le16_to_cpu(desc->rx.o_dm_vlan_id_fb),
+ le16_to_cpu(desc->rx.ot_vlan_tag),
+ le32_to_cpu(desc->rx.bd_base_info));
+
if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) {
u32 ol_info = le32_to_cpu(desc->rx.ol_info);
- sprintf(result[j++], "%5lu", hnae3_get_field(ol_info,
- HNS3_RXD_PTYPE_M,
- HNS3_RXD_PTYPE_S));
- sprintf(result[j++], "%7u", le16_to_cpu(desc->csum));
+ seq_printf(s, "%-7lu%-9u\n",
+ hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
+ HNS3_RXD_PTYPE_S),
+ le16_to_cpu(desc->csum));
} else {
- sprintf(result[j++], "NA");
- sprintf(result[j++], "NA");
+ seq_puts(s, "NA NA\n");
}
}
-static int hns3_dbg_rx_bd_info(struct hns3_dbg_data *d, char *buf, int len)
+static int hns3_dbg_rx_bd_info(struct seq_file *s, void *private)
{
- char data_str[ARRAY_SIZE(rx_bd_info_items)][HNS3_DBG_DATA_STR_LEN];
- struct hns3_nic_priv *priv = d->handle->priv;
- char *result[ARRAY_SIZE(rx_bd_info_items)];
- char content[HNS3_DBG_INFO_LEN];
+ struct hns3_dbg_data *data = s->private;
+ struct hnae3_handle *h = data->handle;
+ struct hns3_nic_priv *priv = h->priv;
struct hns3_enet_ring *ring;
struct hns3_desc *desc;
unsigned int i;
- int pos = 0;
- if (d->qid >= d->handle->kinfo.num_tqps) {
- dev_err(&d->handle->pdev->dev,
- "queue%u is not in use\n", d->qid);
+ if (data->qid >= h->kinfo.num_tqps) {
+ dev_err(&h->pdev->dev, "queue%u is not in use\n", data->qid);
return -EINVAL;
}
- for (i = 0; i < ARRAY_SIZE(rx_bd_info_items); i++)
- result[i] = &data_str[i][0];
+ seq_printf(s, "Queue %u rx bd info:\n", data->qid);
+ seq_puts(s, "BD_IDX L234_INFO PKT_LEN SIZE ");
+ seq_puts(s, "RSS_HASH FD_ID VLAN_TAG O_DM_VLAN_ID_FB ");
+ seq_puts(s, "OT_VLAN_TAG BD_BASE_INFO PTYPE HW_CSUM\n");
- pos += scnprintf(buf + pos, len - pos,
- "Queue %u rx bd info:\n", d->qid);
- hns3_dbg_fill_content(content, sizeof(content), rx_bd_info_items,
- NULL, ARRAY_SIZE(rx_bd_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
-
- ring = &priv->ring[d->qid + d->handle->kinfo.num_tqps];
+ ring = &priv->ring[data->qid + data->handle->kinfo.num_tqps];
for (i = 0; i < ring->desc_num; i++) {
desc = &ring->desc[i];
- hns3_dump_rx_bd_info(priv, desc, result, i);
- hns3_dbg_fill_content(content, sizeof(content),
- rx_bd_info_items, (const char **)result,
- ARRAY_SIZE(rx_bd_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ hns3_dump_rx_bd_info(priv, desc, s, i);
}
return 0;
}
-static const struct hns3_dbg_item tx_bd_info_items[] = {
- { "BD_IDX", 2 },
- { "ADDRESS", 13 },
- { "VLAN_TAG", 2 },
- { "SIZE", 2 },
- { "T_CS_VLAN_TSO", 2 },
- { "OT_VLAN_TAG", 3 },
- { "TV", 5 },
- { "OLT_VLAN_LEN", 2 },
- { "PAYLEN_OL4CS", 2 },
- { "BD_FE_SC_VLD", 2 },
- { "MSS_HW_CSUM", 0 },
-};
-
-static void hns3_dump_tx_bd_info(struct hns3_desc *desc, char **result, int idx)
+static void hns3_dump_tx_bd_info(struct hns3_desc *desc, struct seq_file *s,
+ int idx)
{
- unsigned int j = 0;
-
- sprintf(result[j++], "%d", idx);
- sprintf(result[j++], "%#llx", le64_to_cpu(desc->addr));
- sprintf(result[j++], "%u", le16_to_cpu(desc->tx.vlan_tag));
- sprintf(result[j++], "%u", le16_to_cpu(desc->tx.send_size));
- sprintf(result[j++], "%#x",
- le32_to_cpu(desc->tx.type_cs_vlan_tso_len));
- sprintf(result[j++], "%u", le16_to_cpu(desc->tx.outer_vlan_tag));
- sprintf(result[j++], "%u", le16_to_cpu(desc->tx.tv));
- sprintf(result[j++], "%u",
- le32_to_cpu(desc->tx.ol_type_vlan_len_msec));
- sprintf(result[j++], "%#x", le32_to_cpu(desc->tx.paylen_ol4cs));
- sprintf(result[j++], "%#x", le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri));
- sprintf(result[j++], "%u", le16_to_cpu(desc->tx.mss_hw_csum));
+ seq_printf(s, "%-8d%#-20llx%-10u%-6u%#-15x%-14u%-7u%-16u%#-14x%#-14x%-11u\n",
+ idx, le64_to_cpu(desc->addr),
+ le16_to_cpu(desc->tx.vlan_tag),
+ le16_to_cpu(desc->tx.send_size),
+ le32_to_cpu(desc->tx.type_cs_vlan_tso_len),
+ le16_to_cpu(desc->tx.outer_vlan_tag),
+ le16_to_cpu(desc->tx.tv),
+ le32_to_cpu(desc->tx.ol_type_vlan_len_msec),
+ le32_to_cpu(desc->tx.paylen_ol4cs),
+ le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri),
+ le16_to_cpu(desc->tx.mss_hw_csum));
}
-static int hns3_dbg_tx_bd_info(struct hns3_dbg_data *d, char *buf, int len)
+static int hns3_dbg_tx_bd_info(struct seq_file *s, void *private)
{
- char data_str[ARRAY_SIZE(tx_bd_info_items)][HNS3_DBG_DATA_STR_LEN];
- struct hns3_nic_priv *priv = d->handle->priv;
- char *result[ARRAY_SIZE(tx_bd_info_items)];
- char content[HNS3_DBG_INFO_LEN];
+ struct hns3_dbg_data *data = s->private;
+ struct hnae3_handle *h = data->handle;
+ struct hns3_nic_priv *priv = h->priv;
struct hns3_enet_ring *ring;
struct hns3_desc *desc;
unsigned int i;
- int pos = 0;
- if (d->qid >= d->handle->kinfo.num_tqps) {
- dev_err(&d->handle->pdev->dev,
- "queue%u is not in use\n", d->qid);
+ if (data->qid >= h->kinfo.num_tqps) {
+ dev_err(&h->pdev->dev, "queue%u is not in use\n", data->qid);
return -EINVAL;
}
- for (i = 0; i < ARRAY_SIZE(tx_bd_info_items); i++)
- result[i] = &data_str[i][0];
+ seq_printf(s, "Queue %u tx bd info:\n", data->qid);
+ seq_puts(s, "BD_IDX ADDRESS VLAN_TAG SIZE ");
+ seq_puts(s, "T_CS_VLAN_TSO OT_VLAN_TAG TV OLT_VLAN_LEN ");
+ seq_puts(s, "PAYLEN_OL4CS BD_FE_SC_VLD MSS_HW_CSUM\n");
- pos += scnprintf(buf + pos, len - pos,
- "Queue %u tx bd info:\n", d->qid);
- hns3_dbg_fill_content(content, sizeof(content), tx_bd_info_items,
- NULL, ARRAY_SIZE(tx_bd_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
-
- ring = &priv->ring[d->qid];
+ ring = &priv->ring[data->qid];
for (i = 0; i < ring->desc_num; i++) {
desc = &ring->desc[i];
- hns3_dump_tx_bd_info(desc, result, i);
- hns3_dbg_fill_content(content, sizeof(content),
- tx_bd_info_items, (const char **)result,
- ARRAY_SIZE(tx_bd_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ hns3_dump_tx_bd_info(desc, s, i);
}
return 0;
}
-static void
-hns3_dbg_dev_caps(struct hnae3_handle *h, char *buf, int len, int *pos)
+static void hns3_dbg_dev_caps(struct hnae3_handle *h, struct seq_file *s)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
unsigned long *caps = ae_dev->caps;
u32 i, state;
- *pos += scnprintf(buf + *pos, len - *pos, "dev capability:\n");
+ seq_puts(s, "dev capability:\n");
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cap); i++) {
state = test_bit(hns3_dbg_cap[i].cap_bit, caps);
- *pos += scnprintf(buf + *pos, len - *pos, "%s: %s\n",
- hns3_dbg_cap[i].name, str_yes_no(state));
+ seq_printf(s, "%s: %s\n", hns3_dbg_cap[i].name,
+ str_yes_no(state));
}
- *pos += scnprintf(buf + *pos, len - *pos, "\n");
+ seq_puts(s, "\n");
}
-static void
-hns3_dbg_dev_specs(struct hnae3_handle *h, char *buf, int len, int *pos)
+static void hns3_dbg_dev_specs(struct hnae3_handle *h, struct seq_file *s)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
struct hnae3_dev_specs *dev_specs = &ae_dev->dev_specs;
struct hnae3_knic_private_info *kinfo = &h->kinfo;
struct net_device *dev = kinfo->netdev;
- *pos += scnprintf(buf + *pos, len - *pos, "dev_spec:\n");
- *pos += scnprintf(buf + *pos, len - *pos, "MAC entry num: %u\n",
- dev_specs->mac_entry_num);
- *pos += scnprintf(buf + *pos, len - *pos, "MNG entry num: %u\n",
- dev_specs->mng_entry_num);
- *pos += scnprintf(buf + *pos, len - *pos, "MAX non tso bd num: %u\n",
- dev_specs->max_non_tso_bd_num);
- *pos += scnprintf(buf + *pos, len - *pos, "RSS ind tbl size: %u\n",
- dev_specs->rss_ind_tbl_size);
- *pos += scnprintf(buf + *pos, len - *pos, "RSS key size: %u\n",
- dev_specs->rss_key_size);
- *pos += scnprintf(buf + *pos, len - *pos, "RSS size: %u\n",
- kinfo->rss_size);
- *pos += scnprintf(buf + *pos, len - *pos, "Allocated RSS size: %u\n",
- kinfo->req_rss_size);
- *pos += scnprintf(buf + *pos, len - *pos,
- "Task queue pairs numbers: %u\n",
- kinfo->num_tqps);
- *pos += scnprintf(buf + *pos, len - *pos, "RX buffer length: %u\n",
- kinfo->rx_buf_len);
- *pos += scnprintf(buf + *pos, len - *pos, "Desc num per TX queue: %u\n",
- kinfo->num_tx_desc);
- *pos += scnprintf(buf + *pos, len - *pos, "Desc num per RX queue: %u\n",
- kinfo->num_rx_desc);
- *pos += scnprintf(buf + *pos, len - *pos,
- "Total number of enabled TCs: %u\n",
- kinfo->tc_info.num_tc);
- *pos += scnprintf(buf + *pos, len - *pos, "MAX INT QL: %u\n",
- dev_specs->int_ql_max);
- *pos += scnprintf(buf + *pos, len - *pos, "MAX INT GL: %u\n",
- dev_specs->max_int_gl);
- *pos += scnprintf(buf + *pos, len - *pos, "MAX TM RATE: %u\n",
- dev_specs->max_tm_rate);
- *pos += scnprintf(buf + *pos, len - *pos, "MAX QSET number: %u\n",
- dev_specs->max_qset_num);
- *pos += scnprintf(buf + *pos, len - *pos, "umv size: %u\n",
- dev_specs->umv_size);
- *pos += scnprintf(buf + *pos, len - *pos, "mc mac size: %u\n",
- dev_specs->mc_mac_size);
- *pos += scnprintf(buf + *pos, len - *pos, "MAC statistics number: %u\n",
- dev_specs->mac_stats_num);
- *pos += scnprintf(buf + *pos, len - *pos,
- "TX timeout threshold: %d seconds\n",
- dev->watchdog_timeo / HZ);
- *pos += scnprintf(buf + *pos, len - *pos, "Hilink Version: %u\n",
- dev_specs->hilink_version);
+ seq_puts(s, "dev_spec:\n");
+ seq_printf(s, "MAC entry num: %u\n", dev_specs->mac_entry_num);
+ seq_printf(s, "MNG entry num: %u\n", dev_specs->mng_entry_num);
+ seq_printf(s, "MAX non tso bd num: %u\n",
+ dev_specs->max_non_tso_bd_num);
+ seq_printf(s, "RSS ind tbl size: %u\n", dev_specs->rss_ind_tbl_size);
+ seq_printf(s, "RSS key size: %u\n", dev_specs->rss_key_size);
+ seq_printf(s, "RSS size: %u\n", kinfo->rss_size);
+ seq_printf(s, "Allocated RSS size: %u\n", kinfo->req_rss_size);
+ seq_printf(s, "Task queue pairs numbers: %u\n", kinfo->num_tqps);
+ seq_printf(s, "RX buffer length: %u\n", kinfo->rx_buf_len);
+ seq_printf(s, "Desc num per TX queue: %u\n", kinfo->num_tx_desc);
+ seq_printf(s, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
+ seq_printf(s, "Total number of enabled TCs: %u\n",
+ kinfo->tc_info.num_tc);
+ seq_printf(s, "MAX INT QL: %u\n", dev_specs->int_ql_max);
+ seq_printf(s, "MAX INT GL: %u\n", dev_specs->max_int_gl);
+ seq_printf(s, "MAX TM RATE: %u\n", dev_specs->max_tm_rate);
+ seq_printf(s, "MAX QSET number: %u\n", dev_specs->max_qset_num);
+ seq_printf(s, "umv size: %u\n", dev_specs->umv_size);
+ seq_printf(s, "mc mac size: %u\n", dev_specs->mc_mac_size);
+ seq_printf(s, "MAC statistics number: %u\n", dev_specs->mac_stats_num);
+ seq_printf(s, "TX timeout threshold: %d seconds\n",
+ dev->watchdog_timeo / HZ);
+ seq_printf(s, "mac tunnel number: %u\n", dev_specs->tnl_num);
+ seq_printf(s, "Hilink Version: %u\n", dev_specs->hilink_version);
}
-static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len)
+static int hns3_dbg_dev_info(struct seq_file *s, void *data)
{
- int pos = 0;
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
- hns3_dbg_dev_caps(h, buf, len, &pos);
-
- hns3_dbg_dev_specs(h, buf, len, &pos);
+ hns3_dbg_dev_caps(h, s);
+ hns3_dbg_dev_specs(h, s);
return 0;
}
-static const struct hns3_dbg_item page_pool_info_items[] = {
- { "QUEUE_ID", 2 },
- { "ALLOCATE_CNT", 2 },
- { "FREE_CNT", 6 },
- { "POOL_SIZE(PAGE_NUM)", 2 },
- { "ORDER", 2 },
- { "NUMA_ID", 2 },
- { "MAX_LEN", 2 },
-};
-
static void hns3_dump_page_pool_info(struct hns3_enet_ring *ring,
- char **result, u32 index)
+ struct seq_file *s, u32 index)
{
- u32 j = 0;
-
- sprintf(result[j++], "%u", index);
- sprintf(result[j++], "%u",
- READ_ONCE(ring->page_pool->pages_state_hold_cnt));
- sprintf(result[j++], "%d",
- atomic_read(&ring->page_pool->pages_state_release_cnt));
- sprintf(result[j++], "%u", ring->page_pool->p.pool_size);
- sprintf(result[j++], "%u", ring->page_pool->p.order);
- sprintf(result[j++], "%d", ring->page_pool->p.nid);
- sprintf(result[j++], "%uK", ring->page_pool->p.max_len / 1024);
+ seq_printf(s, "%-10u%-14u%-14d%-21u%-7u%-9d%uK\n",
+ index,
+ READ_ONCE(ring->page_pool->pages_state_hold_cnt),
+ atomic_read(&ring->page_pool->pages_state_release_cnt),
+ ring->page_pool->p.pool_size,
+ ring->page_pool->p.order,
+ ring->page_pool->p.nid,
+ ring->page_pool->p.max_len / 1024);
}
-static int
-hns3_dbg_page_pool_info(struct hnae3_handle *h, char *buf, int len)
+static int hns3_dbg_page_pool_info(struct seq_file *s, void *data)
{
- char data_str[ARRAY_SIZE(page_pool_info_items)][HNS3_DBG_DATA_STR_LEN];
- char *result[ARRAY_SIZE(page_pool_info_items)];
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
struct hns3_nic_priv *priv = h->priv;
- char content[HNS3_DBG_INFO_LEN];
struct hns3_enet_ring *ring;
- int pos = 0;
u32 i;
if (!priv->ring) {
@@ -1163,162 +800,44 @@ hns3_dbg_page_pool_info(struct hnae3_handle *h, char *buf, int len)
return -EFAULT;
}
- for (i = 0; i < ARRAY_SIZE(page_pool_info_items); i++)
- result[i] = &data_str[i][0];
+ seq_puts(s, "QUEUE_ID ALLOCATE_CNT FREE_CNT ");
+ seq_puts(s, "POOL_SIZE(PAGE_NUM) ORDER NUMA_ID MAX_LEN\n");
- hns3_dbg_fill_content(content, sizeof(content), page_pool_info_items,
- NULL, ARRAY_SIZE(page_pool_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
for (i = 0; i < h->kinfo.num_tqps; i++) {
if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
return -EPERM;
+
ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)];
- hns3_dump_page_pool_info(ring, result, i);
- hns3_dbg_fill_content(content, sizeof(content),
- page_pool_info_items,
- (const char **)result,
- ARRAY_SIZE(page_pool_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ hns3_dump_page_pool_info(ring, s, i);
}
return 0;
}
-static int hns3_dbg_get_cmd_index(struct hns3_dbg_data *dbg_data, u32 *index)
-{
- u32 i;
-
- for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
- if (hns3_dbg_cmd[i].cmd == dbg_data->cmd) {
- *index = i;
- return 0;
- }
- }
-
- dev_err(&dbg_data->handle->pdev->dev, "unknown command(%d)\n",
- dbg_data->cmd);
- return -EINVAL;
-}
-
-static const struct hns3_dbg_func hns3_dbg_cmd_func[] = {
- {
- .cmd = HNAE3_DBG_CMD_QUEUE_MAP,
- .dbg_dump = hns3_dbg_queue_map,
- },
- {
- .cmd = HNAE3_DBG_CMD_DEV_INFO,
- .dbg_dump = hns3_dbg_dev_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_TX_BD,
- .dbg_dump_bd = hns3_dbg_tx_bd_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_RX_BD,
- .dbg_dump_bd = hns3_dbg_rx_bd_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_RX_QUEUE_INFO,
- .dbg_dump = hns3_dbg_rx_queue_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO,
- .dbg_dump = hns3_dbg_tx_queue_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO,
- .dbg_dump = hns3_dbg_page_pool_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_COAL_INFO,
- .dbg_dump = hns3_dbg_coal_info,
- },
-};
-
-static int hns3_dbg_read_cmd(struct hns3_dbg_data *dbg_data,
- enum hnae3_dbg_cmd cmd, char *buf, int len)
-{
- const struct hnae3_ae_ops *ops = dbg_data->handle->ae_algo->ops;
- const struct hns3_dbg_func *cmd_func;
- u32 i;
-
- for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd_func); i++) {
- if (cmd == hns3_dbg_cmd_func[i].cmd) {
- cmd_func = &hns3_dbg_cmd_func[i];
- if (cmd_func->dbg_dump)
- return cmd_func->dbg_dump(dbg_data->handle, buf,
- len);
- else
- return cmd_func->dbg_dump_bd(dbg_data, buf,
- len);
- }
- }
-
- if (!ops->dbg_read_cmd)
- return -EOPNOTSUPP;
-
- return ops->dbg_read_cmd(dbg_data->handle, cmd, buf, len);
-}
-
-static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- char *buf = filp->private_data;
-
- return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
-}
-
-static int hns3_dbg_open(struct inode *inode, struct file *filp)
+static int hns3_dbg_bd_info_show(struct seq_file *s, void *private)
{
- struct hns3_dbg_data *dbg_data = inode->i_private;
- struct hnae3_handle *handle = dbg_data->handle;
- struct hns3_nic_priv *priv = handle->priv;
- u32 index;
- char *buf;
- int ret;
+ struct hns3_dbg_data *data = s->private;
+ struct hnae3_handle *h = data->handle;
+ struct hns3_nic_priv *priv = h->priv;
if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
return -EBUSY;
- ret = hns3_dbg_get_cmd_index(dbg_data, &index);
- if (ret)
- return ret;
-
- buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
- buf, hns3_dbg_cmd[index].buf_len);
- if (ret) {
- kvfree(buf);
- return ret;
- }
-
- filp->private_data = buf;
- return 0;
-}
+ if (data->cmd == HNAE3_DBG_CMD_TX_BD)
+ return hns3_dbg_tx_bd_info(s, private);
+ else if (data->cmd == HNAE3_DBG_CMD_RX_BD)
+ return hns3_dbg_rx_bd_info(s, private);
-static int hns3_dbg_release(struct inode *inode, struct file *filp)
-{
- kvfree(filp->private_data);
- filp->private_data = NULL;
- return 0;
+ return -EOPNOTSUPP;
}
-
-static const struct file_operations hns3_dbg_fops = {
- .owner = THIS_MODULE,
- .open = hns3_dbg_open,
- .read = hns3_dbg_read,
- .release = hns3_dbg_release,
-};
+DEFINE_SHOW_ATTRIBUTE(hns3_dbg_bd_info);
static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd)
{
- struct dentry *entry_dir;
struct hns3_dbg_data *data;
+ struct dentry *entry_dir;
u16 max_queue_num;
unsigned int i;
@@ -1337,34 +856,73 @@ static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd)
data[i].qid = i;
sprintf(name, "%s%u", hns3_dbg_cmd[cmd].name, i);
debugfs_create_file(name, 0400, entry_dir, &data[i],
- &hns3_dbg_fops);
+ &hns3_dbg_bd_info_fops);
}
return 0;
}
-static int
-hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd)
+static int hns3_dbg_common_init_t1(struct hnae3_handle *handle, u32 cmd)
{
- struct hns3_dbg_data *data;
+ struct device *dev = &handle->pdev->dev;
struct dentry *entry_dir;
+ read_func func = NULL;
+
+ switch (hns3_dbg_cmd[cmd].cmd) {
+ case HNAE3_DBG_CMD_TX_QUEUE_INFO:
+ func = hns3_dbg_tx_queue_info;
+ break;
+ case HNAE3_DBG_CMD_RX_QUEUE_INFO:
+ func = hns3_dbg_rx_queue_info;
+ break;
+ case HNAE3_DBG_CMD_QUEUE_MAP:
+ func = hns3_dbg_queue_map;
+ break;
+ case HNAE3_DBG_CMD_PAGE_POOL_INFO:
+ func = hns3_dbg_page_pool_info;
+ break;
+ case HNAE3_DBG_CMD_COAL_INFO:
+ func = hns3_dbg_coal_info;
+ break;
+ case HNAE3_DBG_CMD_DEV_INFO:
+ func = hns3_dbg_dev_info;
+ break;
+ default:
+ return -EINVAL;
+ }
- data = devm_kzalloc(&handle->pdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry;
+ debugfs_create_devm_seqfile(dev, hns3_dbg_cmd[cmd].name, entry_dir,
+ func);
+
+ return 0;
+}
+
+static int hns3_dbg_common_init_t2(struct hnae3_handle *handle, u32 cmd)
+{
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
+ struct device *dev = &handle->pdev->dev;
+ struct dentry *entry_dir;
+ read_func func;
+ int ret;
+
+ if (!ops->dbg_get_read_func)
+ return 0;
+
+ ret = ops->dbg_get_read_func(handle, hns3_dbg_cmd[cmd].cmd, &func);
+ if (ret)
+ return ret;
- data->handle = handle;
- data->cmd = hns3_dbg_cmd[cmd].cmd;
entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry;
- debugfs_create_file(hns3_dbg_cmd[cmd].name, 0400, entry_dir,
- data, &hns3_dbg_fops);
+ debugfs_create_devm_seqfile(dev, hns3_dbg_cmd[cmd].name, entry_dir,
+ func);
return 0;
}
int hns3_dbg_init(struct hnae3_handle *handle)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
const char *name = pci_name(handle->pdev);
int ret;
u32 i;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
index 4a5ef8a90a10..57c9d3fc1b27 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
@@ -6,15 +6,6 @@
#include "hnae3.h"
-#define HNS3_DBG_READ_LEN 65536
-#define HNS3_DBG_READ_LEN_128KB 0x20000
-#define HNS3_DBG_READ_LEN_1MB 0x100000
-#define HNS3_DBG_READ_LEN_4MB 0x400000
-#define HNS3_DBG_READ_LEN_5MB 0x500000
-#define HNS3_DBG_WRITE_LEN 1024
-
-#define HNS3_DBG_DATA_STR_LEN 32
-#define HNS3_DBG_INFO_LEN 256
#define HNS3_DBG_ITEM_NAME_LEN 32
#define HNS3_DBG_FILE_NAME_LEN 16
@@ -49,16 +40,9 @@ struct hns3_dbg_cmd_info {
const char *name;
enum hnae3_dbg_cmd cmd;
enum hns3_dbg_dentry_type dentry;
- u32 buf_len;
int (*init)(struct hnae3_handle *handle, unsigned int cmd);
};
-struct hns3_dbg_func {
- enum hnae3_dbg_cmd cmd;
- int (*dbg_dump)(struct hnae3_handle *handle, char *buf, int len);
- int (*dbg_dump_bd)(struct hns3_dbg_data *data, char *buf, int len);
-};
-
struct hns3_dbg_cap_info {
const char *name;
enum HNAE3_DEV_CAP_BITS cap_bit;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index b03b8758c777..bfa5568baa92 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -11,6 +11,7 @@
#include <linux/irq.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
@@ -547,9 +548,9 @@ void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector,
static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
struct hns3_nic_priv *priv)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
struct hns3_enet_coalesce *ptx_coal = &priv->tx_coal;
struct hns3_enet_coalesce *prx_coal = &priv->rx_coal;
@@ -960,7 +961,7 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev)
void hns3_request_update_promisc_mode(struct hnae3_handle *handle)
{
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
if (ops->request_update_promisc_mode)
ops->request_update_promisc_mode(handle);
@@ -1039,6 +1040,8 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
{
u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
+ struct net_device *netdev = ring_to_netdev(ring);
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hns3_tx_spare *tx_spare;
struct page *page;
dma_addr_t dma;
@@ -1080,6 +1083,7 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
tx_spare->buf = page_address(page);
tx_spare->len = PAGE_SIZE << order;
ring->tx_spare = tx_spare;
+ ring->tx_copybreak = priv->tx_copybreak;
return;
dma_mapping_error:
@@ -1304,7 +1308,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
{
struct hns3_nic_priv *priv = netdev_priv(skb->dev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
union l4_hdr_info l4;
/* device version above V3(include V3), the hardware can
@@ -1504,7 +1508,7 @@ static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
* VLAN enabled, only one VLAN header is allowed in skb, otherwise it
* will cause RAS error.
*/
- ae_dev = pci_get_drvdata(handle->pdev);
+ ae_dev = hns3_get_ae_dev(handle);
if (unlikely(skb_vlan_tagged_multi(skb) &&
ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
handle->port_base_vlan_state ==
@@ -1690,8 +1694,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, dma_addr_t dma,
#define HNS3_LIKELY_BD_NUM 1
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
- unsigned int frag_buf_num;
- int k, sizeoflast;
+ unsigned int frag_buf_num, k;
+ int sizeoflast;
if (likely(size <= HNS3_MAX_BD_SIZE)) {
desc->addr = cpu_to_le64(dma);
@@ -1863,7 +1867,7 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
unsigned int bd_num, u8 max_non_tso_bd_num)
{
unsigned int tot_len = 0;
- int i;
+ unsigned int i;
for (i = 0; i < max_non_tso_bd_num - 1U; i++)
tot_len += bd_size[i];
@@ -1891,7 +1895,7 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
{
- int i;
+ u32 i;
for (i = 0; i < MAX_SKB_FRAGS; i++)
size[i] = skb_frag_size(&shinfo->frags[i]);
@@ -2106,7 +2110,7 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
*/
if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num &&
!ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) {
- /* This smp_store_release() pairs with smp_load_aquire() in
+ /* This smp_store_release() pairs with smp_load_acquire() in
* hns3_nic_reclaim_desc(). Ensure that the BD valid bit
* is updated.
*/
@@ -2122,7 +2126,7 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
return;
}
- /* This smp_store_release() pairs with smp_load_aquire() in
+ /* This smp_store_release() pairs with smp_load_acquire() in
* hns3_nic_reclaim_desc(). Ensure that the BD valid bit is updated.
*/
smp_store_release(&ring->last_to_use, ring->next_to_use);
@@ -2207,9 +2211,9 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
u32 nfrag = skb_shinfo(skb)->nr_frags + 1;
struct sg_table *sgt;
- int i, bd_num = 0;
+ int bd_num = 0;
dma_addr_t dma;
- u32 cb_len;
+ u32 cb_len, i;
int nents;
if (skb_has_frag_list(skb))
@@ -2447,7 +2451,7 @@ static int hns3_nic_set_features(struct net_device *netdev,
if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
h->ae_algo->ops->cls_flower_active(h)) {
netdev_err(netdev,
- "there are offloaded TC filters active, cannot disable HW TC offload");
+ "there are offloaded TC filters active, cannot disable HW TC offload\n");
return -EINVAL;
}
@@ -2544,7 +2548,7 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
struct hnae3_handle *handle = priv->ae_handle;
struct rtnl_link_stats64 ring_total_stats;
struct hns3_enet_ring *ring;
- unsigned int idx;
+ int idx;
if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
return;
@@ -2770,7 +2774,7 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
static int hns3_get_timeout_queue(struct net_device *ndev)
{
- int i;
+ unsigned int i;
/* Find the stopped queue the same way the stack does */
for (i = 0; i < ndev->num_tx_queues; i++) {
@@ -2851,7 +2855,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = hns3_get_handle(ndev);
struct hns3_enet_ring *tx_ring;
- int timeout_queue;
+ u32 timeout_queue;
timeout_queue = hns3_get_timeout_queue(ndev);
if (timeout_queue >= ndev->num_tx_queues) {
@@ -3821,7 +3825,7 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
{
__be16 type = skb->protocol;
struct tcphdr *th;
- int depth = 0;
+ u32 depth = 0;
while (eth_type_vlan(type)) {
struct vlan_hdr *vh;
@@ -4747,7 +4751,7 @@ map_ring_fail:
static void hns3_nic_init_coal_cfg(struct hns3_nic_priv *priv)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
@@ -4874,6 +4878,30 @@ static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
devm_kfree(&pdev->dev, priv->tqp_vector);
}
+static void hns3_update_tx_spare_buf_config(struct hns3_nic_priv *priv)
+{
+#define HNS3_MIN_SPARE_BUF_SIZE (2 * 1024 * 1024)
+#define HNS3_MAX_PACKET_SIZE (64 * 1024)
+
+ struct iommu_domain *domain = iommu_get_domain_for_dev(priv->dev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
+ struct hnae3_handle *handle = priv->ae_handle;
+
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3)
+ return;
+
+ if (!(domain && iommu_is_dma_domain(domain)))
+ return;
+
+ priv->min_tx_copybreak = HNS3_MAX_PACKET_SIZE;
+ priv->min_tx_spare_buf_size = HNS3_MIN_SPARE_BUF_SIZE;
+
+ if (priv->tx_copybreak < priv->min_tx_copybreak)
+ priv->tx_copybreak = priv->min_tx_copybreak;
+ if (handle->kinfo.tx_spare_buf_size < priv->min_tx_spare_buf_size)
+ handle->kinfo.tx_spare_buf_size = priv->min_tx_spare_buf_size;
+}
+
static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
unsigned int ring_type)
{
@@ -5107,6 +5135,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
int i, j;
int ret;
+ hns3_update_tx_spare_buf_config(priv);
for (i = 0; i < ring_num; i++) {
ret = hns3_alloc_ring_memory(&priv->ring[i]);
if (ret) {
@@ -5226,7 +5255,7 @@ static void hns3_info_show(struct hns3_nic_priv *priv)
static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv,
enum dim_cq_period_mode mode, bool is_tx)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
struct hnae3_handle *handle = priv->ae_handle;
int i;
@@ -5264,7 +5293,7 @@ void hns3_cq_period_mode_init(struct hns3_nic_priv *priv,
static void hns3_state_init(struct hnae3_handle *handle)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -5299,6 +5328,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
struct net_device *netdev;
int ret;
+ ae_dev->handle = handle;
+
handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps,
&max_rss_size);
netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps);
@@ -5311,6 +5342,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
priv->ae_handle = handle;
priv->tx_timeout_count = 0;
priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num;
+ priv->min_tx_copybreak = 0;
+ priv->min_tx_spare_buf_size = 0;
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
@@ -5934,7 +5967,7 @@ static const struct hns3_hw_error_info hns3_hw_err[] = {
static void hns3_process_hw_error(struct hnae3_handle *handle,
enum hnae3_hw_error_type type)
{
- int i;
+ u32 i;
for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) {
if (hns3_hw_err[i].type == type) {
@@ -5961,8 +5994,8 @@ static int __init hns3_init_module(void)
{
int ret;
- pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
- pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
+ pr_debug("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
+ pr_debug("%s: %s\n", hns3_driver_name, hns3_copyright);
client.type = HNAE3_CLIENT_KNIC;
snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index d36c4ed16d8d..933e3527ed82 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -596,6 +596,8 @@ struct hns3_nic_priv {
struct hns3_enet_coalesce rx_coal;
u32 tx_copybreak;
u32 rx_copybreak;
+ u32 min_tx_copybreak;
+ u32 min_tx_spare_buf_size;
};
union l3_hdr_info {
@@ -621,7 +623,7 @@ struct hns3_reset_type_map {
enum hnae3_reset_type rst_type;
};
-static inline int ring_space(struct hns3_enet_ring *ring)
+static inline u32 ring_space(struct hns3_enet_ring *ring)
{
/* This smp_load_acquire() pairs with smp_store_release() in
* hns3_nic_reclaim_one_desc called by hns3_clean_tx_ring.
@@ -692,7 +694,7 @@ static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
/* iterator for handling rings in ring group */
#define hns3_for_each_ring(pos, head) \
- for (pos = (head).ring; (pos); pos = (pos)->next)
+ for ((pos) = (head).ring; (pos); (pos) = (pos)->next)
#define hns3_get_handle(ndev) \
(((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 6715222aeb66..d5454e126c85 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -86,7 +86,7 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset);
static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
int ret;
if (!h->ae_algo->ops->set_loopback ||
@@ -171,7 +171,7 @@ static void hns3_lp_setup_skb(struct sk_buff *skb)
* the purpose of mac or serdes selftest.
*/
handle = hns3_get_handle(ndev);
- ae_dev = pci_get_drvdata(handle->pdev);
+ ae_dev = hns3_get_ae_dev(handle);
if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
ethh->h_dest[5] += HNS3_NIC_LB_DST_MAC_ADDR;
eth_zero_addr(ethh->h_source);
@@ -436,7 +436,7 @@ static void hns3_self_test(struct net_device *ndev,
data[i] = HNS3_NIC_LB_TEST_UNEXECUTED;
if (hns3_nic_resetting(ndev)) {
- netdev_err(ndev, "dev resetting!");
+ netdev_err(ndev, "dev resetting!\n");
goto failure;
}
@@ -489,7 +489,7 @@ static const struct hns3_pflag_desc hns3_priv_flags[HNAE3_PFLAG_MAX] = {
static int hns3_get_sset_count(struct net_device *netdev, int stringset)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- const struct hnae3_ae_ops *ops = h->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(h);
if (!ops->get_sset_count)
return -EOPNOTSUPP;
@@ -540,8 +540,8 @@ static void hns3_get_strings_tqps(struct hnae3_handle *handle, u8 **data)
static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- const struct hnae3_ae_ops *ops = h->ae_algo->ops;
- int i;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(h);
+ u32 i;
if (!ops->get_strings)
return;
@@ -569,7 +569,7 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
struct hns3_nic_priv *nic_priv = handle->priv;
struct hns3_enet_ring *ring;
u8 *stat;
- int i, j;
+ u32 i, j;
/* get stats for Tx */
for (i = 0; i < kinfo->num_tqps; i++) {
@@ -692,7 +692,7 @@ static void hns3_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *param)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
if (!test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps))
return;
@@ -706,7 +706,7 @@ static int hns3_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *param)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
if (!test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps))
return -EOPNOTSUPP;
@@ -725,7 +725,7 @@ static int hns3_set_pauseparam(struct net_device *netdev,
static void hns3_get_ksettings(struct hnae3_handle *h,
struct ethtool_link_ksettings *cmd)
{
- const struct hnae3_ae_ops *ops = h->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(h);
/* 1.auto_neg & speed & duplex from cmd */
if (ops->get_ksettings_an_result)
@@ -751,7 +751,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
const struct hnae3_ae_ops *ops;
u8 module_type;
u8 media_type;
@@ -794,7 +794,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
break;
default:
- netdev_warn(netdev, "Unknown media type");
+ netdev_warn(netdev, "Unknown media type\n");
return 0;
}
@@ -814,7 +814,7 @@ static int hns3_check_ksettings_param(const struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
u8 module_type = HNAE3_MODULE_TYPE_UNKNOWN;
u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
u32 lane_num;
@@ -842,7 +842,7 @@ static int hns3_check_ksettings_param(const struct net_device *netdev,
if (cmd->base.duplex == DUPLEX_HALF &&
media_type != HNAE3_MEDIA_TYPE_COPPER) {
netdev_err(netdev,
- "only copper port supports half duplex!");
+ "only copper port supports half duplex!\n");
return -EINVAL;
}
@@ -861,8 +861,8 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
int ret;
/* Chip don't support this mode. */
@@ -932,7 +932,7 @@ static u32 hns3_get_rss_key_size(struct net_device *netdev)
static u32 hns3_get_rss_indir_size(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
return ae_dev->dev_specs.rss_ind_tbl_size;
}
@@ -954,7 +954,7 @@ static int hns3_set_rss(struct net_device *netdev,
struct netlink_ext_ack *extack)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
if (!h->ae_algo->ops->set_rss)
return -EOPNOTSUPP;
@@ -978,6 +978,16 @@ static int hns3_set_rss(struct net_device *netdev,
rxfh->hfunc);
}
+static int hns3_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->get_rss_tuple)
+ return h->ae_algo->ops->get_rss_tuple(h, cmd);
+ return -EOPNOTSUPP;
+}
+
static int hns3_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd,
u32 *rule_locs)
@@ -988,10 +998,6 @@ static int hns3_get_rxnfc(struct net_device *netdev,
case ETHTOOL_GRXRINGS:
cmd->data = h->kinfo.num_tqps;
return 0;
- case ETHTOOL_GRXFH:
- if (h->ae_algo->ops->get_rss_tuple)
- return h->ae_algo->ops->get_rss_tuple(h, cmd);
- return -EOPNOTSUPP;
case ETHTOOL_GRXCLSRLCNT:
if (h->ae_algo->ops->get_fd_rule_cnt)
return h->ae_algo->ops->get_fd_rule_cnt(h, cmd);
@@ -1024,8 +1030,8 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags)
{
enum hnae3_reset_type rst_type = HNAE3_NONE_RESET;
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- const struct hnae3_ae_ops *ops = h->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(h);
const struct hns3_reset_type_map *rst_type_map;
enum ethtool_reset_flags rst_flags;
u32 i, size;
@@ -1189,7 +1195,7 @@ static int hns3_set_tx_push(struct net_device *netdev, u32 tx_push)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
u32 old_state = test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
if (!test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps) && tx_push)
@@ -1275,15 +1281,22 @@ static int hns3_set_ringparam(struct net_device *ndev,
return ret;
}
+static int hns3_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->set_rss_tuple)
+ return h->ae_algo->ops->set_rss_tuple(h, cmd);
+ return -EOPNOTSUPP;
+}
+
static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- if (h->ae_algo->ops->set_rss_tuple)
- return h->ae_algo->ops->set_rss_tuple(h, cmd);
- return -EOPNOTSUPP;
case ETHTOOL_SRXCLSRLINS:
if (h->ae_algo->ops->add_fd_entry)
return h->ae_algo->ops->add_fd_entry(h, cmd);
@@ -1300,7 +1313,7 @@ static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
static int hns3_nway_reset(struct net_device *netdev)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
struct phy_device *phy = netdev->phydev;
int autoneg;
@@ -1308,7 +1321,7 @@ static int hns3_nway_reset(struct net_device *netdev)
return 0;
if (hns3_nic_resetting(netdev)) {
- netdev_err(netdev, "dev resetting!");
+ netdev_err(netdev, "dev resetting!\n");
return -EBUSY;
}
@@ -1377,7 +1390,7 @@ static int hns3_check_gl_coalesce_para(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
u32 rx_gl, tx_gl;
if (cmd->rx_coalesce_usecs > ae_dev->dev_specs.max_int_gl) {
@@ -1449,7 +1462,7 @@ static int hns3_check_ql_coalesce_param(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
if ((cmd->tx_max_coalesced_frames || cmd->rx_max_coalesced_frames) &&
!ae_dev->dev_specs.int_ql_max) {
@@ -1473,7 +1486,7 @@ hns3_check_cqe_coalesce_param(struct net_device *netdev,
struct kernel_ethtool_coalesce *kernel_coal)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
if ((kernel_coal->use_cqe_mode_tx || kernel_coal->use_cqe_mode_rx) &&
!hnae3_ae_dev_cq_supported(ae_dev)) {
@@ -1649,8 +1662,8 @@ static void hns3_get_fec_stats(struct net_device *netdev,
struct ethtool_fec_stats *fec_stats)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
if (!hnae3_ae_dev_fec_stats_supported(ae_dev) || !ops->get_fec_stats)
return;
@@ -1700,8 +1713,8 @@ static int hns3_get_fecparam(struct net_device *netdev,
struct ethtool_fecparam *fec)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
u8 fec_ability;
u8 fec_mode;
@@ -1725,8 +1738,8 @@ static int hns3_set_fecparam(struct net_device *netdev,
struct ethtool_fecparam *fec)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
u32 fec_mode;
if (!test_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps))
@@ -1747,8 +1760,8 @@ static int hns3_get_module_info(struct net_device *netdev,
#define HNS3_SFF_8636_V1_3 0x03
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
struct hns3_sfp_type sfp_type;
int ret;
@@ -1797,8 +1810,8 @@ static int hns3_get_module_eeprom(struct net_device *netdev,
struct ethtool_eeprom *ee, u8 *data)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 ||
!ops->get_module_eeprom)
@@ -1924,7 +1937,7 @@ static int hns3_set_tunable(struct net_device *netdev,
int i, ret = 0;
if (hns3_nic_resetting(netdev) || !priv->ring) {
- netdev_err(netdev, "failed to set tunable value, dev resetting!");
+ netdev_err(netdev, "failed to set tunable value, dev resetting!\n");
return -EBUSY;
}
@@ -2105,6 +2118,8 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
.get_rxfh_indir_size = hns3_get_rss_indir_size,
.get_rxfh = hns3_get_rss,
.set_rxfh = hns3_set_rss,
+ .get_rxfh_fields = hns3_get_rxfh_fields,
+ .set_rxfh_fields = hns3_set_rxfh_fields,
.get_link_ksettings = hns3_get_link_ksettings,
.get_channels = hns3_get_channels,
.set_channels = hns3_set_channels,
@@ -2142,6 +2157,8 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.get_rxfh_indir_size = hns3_get_rss_indir_size,
.get_rxfh = hns3_get_rss,
.set_rxfh = hns3_set_rss,
+ .get_rxfh_fields = hns3_get_rxfh_fields,
+ .set_rxfh_fields = hns3_set_rxfh_fields,
.get_link_ksettings = hns3_get_link_ksettings,
.set_link_ksettings = hns3_set_link_ksettings,
.nway_reset = hns3_nway_reset,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index c46490693594..b76d25074e99 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -12,6 +12,9 @@
#include "hclge_tm.h"
#include "hnae3.h"
+#define hclge_seq_file_to_hdev(s) \
+ (((struct hnae3_ae_dev *)hnae3_seq_file_to_ae_dev(s))->priv)
+
static const char * const hclge_mac_state_str[] = {
"TO_ADD", "TO_DEL", "ACTIVE"
};
@@ -721,48 +724,6 @@ static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
.cmd = HCLGE_OPC_DFX_TQP_REG } },
};
-/* make sure: len(name) + interval >= maxlen(item data) + 2,
- * for example, name = "pkt_num"(len: 7), the prototype of item data is u32,
- * and print as "%u"(maxlen: 10), so the interval should be at least 5.
- */
-static void hclge_dbg_fill_content(char *content, u16 len,
- const struct hclge_dbg_item *items,
- const char **result, u16 size)
-{
-#define HCLGE_DBG_LINE_END_LEN 2
- char *pos = content;
- u16 item_len;
- u16 i;
-
- if (!len) {
- return;
- } else if (len <= HCLGE_DBG_LINE_END_LEN) {
- *pos++ = '\0';
- return;
- }
-
- memset(content, ' ', len);
- len -= HCLGE_DBG_LINE_END_LEN;
-
- for (i = 0; i < size; i++) {
- item_len = strlen(items[i].name) + items[i].interval;
- if (len < item_len)
- break;
-
- if (result) {
- if (item_len < strlen(result[i]))
- break;
- memcpy(pos, result[i], strlen(result[i]));
- } else {
- memcpy(pos, items[i].name, strlen(items[i].name));
- }
- pos += item_len;
- len -= item_len;
- }
- *pos++ = '\n';
- *pos++ = '\0';
-}
-
static char *hclge_dbg_get_func_id_str(char *buf, u8 id)
{
if (id)
@@ -826,14 +787,14 @@ int hclge_dbg_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc_src,
static int
hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
const struct hclge_dbg_reg_type_info *reg_info,
- char *buf, int len, int *pos)
+ struct seq_file *s)
{
const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
+ u32 index, entry, i, cnt, min_num;
struct hclge_desc *desc_src;
- u32 index, entry, i, cnt;
- int bd_num, min_num, ret;
struct hclge_desc *desc;
+ int bd_num, ret;
ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
if (ret)
@@ -846,13 +807,12 @@ hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
for (i = 0, cnt = 0; i < min_num; i++, dfx_message++)
- *pos += scnprintf(buf + *pos, len - *pos, "item%u = %s\n",
- cnt++, dfx_message->message);
+ seq_printf(s, "item%u = %s\n", cnt++, dfx_message->message);
for (i = 0; i < cnt; i++)
- *pos += scnprintf(buf + *pos, len - *pos, "item%u\t", i);
+ seq_printf(s, "item%u\t", i);
- *pos += scnprintf(buf + *pos, len - *pos, "\n");
+ seq_puts(s, "\n");
for (index = 0; index < hdev->vport[0].alloc_tqps; index++) {
dfx_message = reg_info->dfx_msg;
@@ -867,10 +827,9 @@ hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
if (i > 0 && !entry)
desc++;
- *pos += scnprintf(buf + *pos, len - *pos, "%#x\t",
- le32_to_cpu(desc->data[entry]));
+ seq_printf(s, "%#x\t", le32_to_cpu(desc->data[entry]));
}
- *pos += scnprintf(buf + *pos, len - *pos, "\n");
+ seq_puts(s, "\n");
}
kfree(desc_src);
@@ -880,14 +839,14 @@ hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
static int
hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
const struct hclge_dbg_reg_type_info *reg_info,
- char *buf, int len, int *pos)
+ struct seq_file *s)
{
const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
struct hclge_desc *desc_src;
- int bd_num, min_num, ret;
+ int bd_num, min_num, ret, i;
struct hclge_desc *desc;
- u32 entry, i;
+ u32 entry;
ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
if (ret)
@@ -914,9 +873,8 @@ hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
if (!dfx_message->flag)
continue;
- *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
- dfx_message->message,
- le32_to_cpu(desc->data[entry]));
+ seq_printf(s, "%s: %#x\n", dfx_message->message,
+ le32_to_cpu(desc->data[entry]));
}
kfree(desc_src);
@@ -940,8 +898,8 @@ static const struct hclge_dbg_status_dfx_info hclge_dbg_mac_en_status[] = {
{HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, "mac_tx_oversize_truncate_en"}
};
-static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
- int len, int *pos)
+static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_config_mac_mode_cmd *req;
struct hclge_desc desc;
@@ -962,16 +920,15 @@ static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
for (i = 0; i < ARRAY_SIZE(hclge_dbg_mac_en_status); i++) {
offset = hclge_dbg_mac_en_status[i].offset;
- *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
- hclge_dbg_mac_en_status[i].message,
- hnae3_get_bit(loop_en, offset));
+ seq_printf(s, "%s: %#x\n", hclge_dbg_mac_en_status[i].message,
+ hnae3_get_bit(loop_en, offset));
}
return 0;
}
-static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev, char *buf,
- int len, int *pos)
+static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_config_max_frm_size_cmd *req;
struct hclge_desc desc;
@@ -988,16 +945,14 @@ static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev, char *buf,
req = (struct hclge_config_max_frm_size_cmd *)desc.data;
- *pos += scnprintf(buf + *pos, len - *pos, "max_frame_size: %u\n",
- le16_to_cpu(req->max_frm_size));
- *pos += scnprintf(buf + *pos, len - *pos, "min_frame_size: %u\n",
- req->min_frm_size);
+ seq_printf(s, "max_frame_size: %u\n", le16_to_cpu(req->max_frm_size));
+ seq_printf(s, "min_frame_size: %u\n", req->min_frm_size);
return 0;
}
-static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev, char *buf,
- int len, int *pos)
+static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev,
+ struct seq_file *s)
{
#define HCLGE_MAC_SPEED_SHIFT 0
#define HCLGE_MAC_SPEED_MASK GENMASK(5, 0)
@@ -1018,33 +973,31 @@ static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev, char *buf,
req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
- *pos += scnprintf(buf + *pos, len - *pos, "speed: %#lx\n",
- hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
- HCLGE_MAC_SPEED_SHIFT));
- *pos += scnprintf(buf + *pos, len - *pos, "duplex: %#x\n",
- hnae3_get_bit(req->speed_dup,
- HCLGE_MAC_DUPLEX_SHIFT));
+ seq_printf(s, "speed: %#lx\n",
+ hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
+ HCLGE_MAC_SPEED_SHIFT));
+ seq_printf(s, "duplex: %#x\n",
+ hnae3_get_bit(req->speed_dup, HCLGE_MAC_DUPLEX_SHIFT));
return 0;
}
-static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_mac(struct seq_file *s, void *data)
{
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
int ret;
- ret = hclge_dbg_dump_mac_enable_status(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_mac_enable_status(hdev, s);
if (ret)
return ret;
- ret = hclge_dbg_dump_mac_frame_size(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_mac_frame_size(hdev, s);
if (ret)
return ret;
- return hclge_dbg_dump_mac_speed_duplex(hdev, buf, len, &pos);
+ return hclge_dbg_dump_mac_speed_duplex(hdev, s);
}
-static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
@@ -1055,8 +1008,8 @@ static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos,
- "qset_id roce_qset_mask nic_qset_mask qset_shaping_pass qset_bp_status\n");
+ seq_puts(s, "qset_id roce_qset_mask nic_qset_mask ");
+ seq_puts(s, "qset_shaping_pass qset_bp_status\n");
for (qset_id = 0; qset_id < qset_num; qset_id++) {
ret = hclge_dbg_cmd_send(hdev, &desc, qset_id, 1,
HCLGE_OPC_QSET_DFX_STS);
@@ -1065,17 +1018,14 @@ static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
- *pos += scnprintf(buf + *pos, len - *pos,
- "%04u %#x %#x %#x %#x\n",
- qset_id, req.bit0, req.bit1, req.bit2,
- req.bit3);
+ seq_printf(s, "%04u %#-16x%#-15x%#-19x%#-x\n",
+ qset_id, req.bit0, req.bit1, req.bit2, req.bit3);
}
return 0;
}
-static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
@@ -1086,8 +1036,7 @@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos,
- "pri_id pri_mask pri_cshaping_pass pri_pshaping_pass\n");
+ seq_puts(s, "pri_id pri_mask pri_cshaping_pass pri_pshaping_pass\n");
for (pri_id = 0; pri_id < pri_num; pri_id++) {
ret = hclge_dbg_cmd_send(hdev, &desc, pri_id, 1,
HCLGE_OPC_PRI_DFX_STS);
@@ -1096,24 +1045,21 @@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
- *pos += scnprintf(buf + *pos, len - *pos,
- "%03u %#x %#x %#x\n",
- pri_id, req.bit0, req.bit1, req.bit2);
+ seq_printf(s, "%03u %#-10x%#-19x%#-x\n",
+ pri_id, req.bit0, req.bit1, req.bit2);
}
return 0;
}
-static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
u8 pg_id;
int ret;
- *pos += scnprintf(buf + *pos, len - *pos,
- "pg_id pg_mask pg_cshaping_pass pg_pshaping_pass\n");
+ seq_puts(s, "pg_id pg_mask pg_cshaping_pass pg_pshaping_pass\n");
for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
ret = hclge_dbg_cmd_send(hdev, &desc, pg_id, 1,
HCLGE_OPC_PG_DFX_STS);
@@ -1122,47 +1068,41 @@ static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
- *pos += scnprintf(buf + *pos, len - *pos,
- "%03u %#x %#x %#x\n",
- pg_id, req.bit0, req.bit1, req.bit2);
+ seq_printf(s, "%03u %#-9x%#-18x%#-x\n",
+ pg_id, req.bit0, req.bit1, req.bit2);
}
return 0;
}
-static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_desc desc;
u16 nq_id;
int ret;
- *pos += scnprintf(buf + *pos, len - *pos,
- "nq_id sch_nic_queue_cnt sch_roce_queue_cnt\n");
+ seq_puts(s, "nq_id sch_nic_queue_cnt sch_roce_queue_cnt\n");
for (nq_id = 0; nq_id < hdev->num_tqps; nq_id++) {
ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
HCLGE_OPC_SCH_NQ_CNT);
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos, "%04u %#x",
- nq_id, le32_to_cpu(desc.data[1]));
+ seq_printf(s, "%04u %#-19x",
+ nq_id, le32_to_cpu(desc.data[1]));
ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
HCLGE_OPC_SCH_RQ_CNT);
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos,
- " %#x\n",
- le32_to_cpu(desc.data[1]));
+ seq_printf(s, "%#-x\n", le32_to_cpu(desc.data[1]));
}
return 0;
}
-static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
@@ -1176,16 +1116,13 @@ static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
- *pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
- req.bit0);
- *pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
- req.bit1);
+ seq_printf(s, "port_mask: %#x\n", req.bit0);
+ seq_printf(s, "port_shaping_pass: %#x\n", req.bit1);
return 0;
}
-static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_desc desc[2];
u8 port_id = 0;
@@ -1196,32 +1133,23 @@ static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos, "SCH_NIC_NUM: %#x\n",
- le32_to_cpu(desc[0].data[1]));
- *pos += scnprintf(buf + *pos, len - *pos, "SCH_ROCE_NUM: %#x\n",
- le32_to_cpu(desc[0].data[2]));
+ seq_printf(s, "SCH_NIC_NUM: %#x\n", le32_to_cpu(desc[0].data[1]));
+ seq_printf(s, "SCH_ROCE_NUM: %#x\n", le32_to_cpu(desc[0].data[2]));
ret = hclge_dbg_cmd_send(hdev, desc, port_id, 2,
HCLGE_OPC_TM_INTERNAL_STS);
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos, "pri_bp: %#x\n",
- le32_to_cpu(desc[0].data[1]));
- *pos += scnprintf(buf + *pos, len - *pos, "fifo_dfx_info: %#x\n",
- le32_to_cpu(desc[0].data[2]));
- *pos += scnprintf(buf + *pos, len - *pos,
- "sch_roce_fifo_afull_gap: %#x\n",
- le32_to_cpu(desc[0].data[3]));
- *pos += scnprintf(buf + *pos, len - *pos,
- "tx_private_waterline: %#x\n",
- le32_to_cpu(desc[0].data[4]));
- *pos += scnprintf(buf + *pos, len - *pos, "tm_bypass_en: %#x\n",
- le32_to_cpu(desc[0].data[5]));
- *pos += scnprintf(buf + *pos, len - *pos, "SSU_TM_BYPASS_EN: %#x\n",
- le32_to_cpu(desc[1].data[0]));
- *pos += scnprintf(buf + *pos, len - *pos, "SSU_RESERVE_CFG: %#x\n",
- le32_to_cpu(desc[1].data[1]));
+ seq_printf(s, "pri_bp: %#x\n", le32_to_cpu(desc[0].data[1]));
+ seq_printf(s, "fifo_dfx_info: %#x\n", le32_to_cpu(desc[0].data[2]));
+ seq_printf(s, "sch_roce_fifo_afull_gap: %#x\n",
+ le32_to_cpu(desc[0].data[3]));
+ seq_printf(s, "tx_private_waterline: %#x\n",
+ le32_to_cpu(desc[0].data[4]));
+ seq_printf(s, "tm_bypass_en: %#x\n", le32_to_cpu(desc[0].data[5]));
+ seq_printf(s, "SSU_TM_BYPASS_EN: %#x\n", le32_to_cpu(desc[1].data[0]));
+ seq_printf(s, "SSU_RESERVE_CFG: %#x\n", le32_to_cpu(desc[1].data[1]));
if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER)
return 0;
@@ -1231,65 +1159,60 @@ static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos, "TC_MAP_SEL: %#x\n",
- le32_to_cpu(desc[0].data[1]));
- *pos += scnprintf(buf + *pos, len - *pos, "IGU_PFC_PRI_EN: %#x\n",
- le32_to_cpu(desc[0].data[2]));
- *pos += scnprintf(buf + *pos, len - *pos, "MAC_PFC_PRI_EN: %#x\n",
- le32_to_cpu(desc[0].data[3]));
- *pos += scnprintf(buf + *pos, len - *pos, "IGU_PRI_MAP_TC_CFG: %#x\n",
- le32_to_cpu(desc[0].data[4]));
- *pos += scnprintf(buf + *pos, len - *pos,
- "IGU_TX_PRI_MAP_TC_CFG: %#x\n",
- le32_to_cpu(desc[0].data[5]));
+ seq_printf(s, "TC_MAP_SEL: %#x\n", le32_to_cpu(desc[0].data[1]));
+ seq_printf(s, "IGU_PFC_PRI_EN: %#x\n", le32_to_cpu(desc[0].data[2]));
+ seq_printf(s, "MAC_PFC_PRI_EN: %#x\n", le32_to_cpu(desc[0].data[3]));
+ seq_printf(s, "IGU_PRI_MAP_TC_CFG: %#x\n",
+ le32_to_cpu(desc[0].data[4]));
+ seq_printf(s, "IGU_TX_PRI_MAP_TC_CFG: %#x\n",
+ le32_to_cpu(desc[0].data[5]));
return 0;
}
-static int hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_dcb(struct seq_file *s, void *data)
{
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
int ret;
- ret = hclge_dbg_dump_dcb_qset(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_dcb_qset(hdev, s);
if (ret)
return ret;
- ret = hclge_dbg_dump_dcb_pri(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_dcb_pri(hdev, s);
if (ret)
return ret;
- ret = hclge_dbg_dump_dcb_pg(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_dcb_pg(hdev, s);
if (ret)
return ret;
- ret = hclge_dbg_dump_dcb_queue(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_dcb_queue(hdev, s);
if (ret)
return ret;
- ret = hclge_dbg_dump_dcb_port(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_dcb_port(hdev, s);
if (ret)
return ret;
- return hclge_dbg_dump_dcb_tm(hdev, buf, len, &pos);
+ return hclge_dbg_dump_dcb_tm(hdev, s);
}
-static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev,
- enum hnae3_dbg_cmd cmd, char *buf, int len)
+static int hclge_dbg_dump_reg_cmd(enum hnae3_dbg_cmd cmd, struct seq_file *s)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
const struct hclge_dbg_reg_type_info *reg_info;
- int pos = 0, ret = 0;
- int i;
+ int ret = 0;
+ u32 i;
for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
reg_info = &hclge_dbg_reg_info[i];
if (cmd == reg_info->cmd) {
if (cmd == HNAE3_DBG_CMD_REG_TQP)
- return hclge_dbg_dump_reg_tqp(hdev, reg_info,
- buf, len, &pos);
+ return hclge_dbg_dump_reg_tqp(hdev,
+ reg_info, s);
- ret = hclge_dbg_dump_reg_common(hdev, reg_info, buf,
- len, &pos);
+ ret = hclge_dbg_dump_reg_common(hdev, reg_info, s);
if (ret)
break;
}
@@ -1298,12 +1221,57 @@ static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev,
return ret;
}
-static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_bios_reg_cmd(struct seq_file *s, void *data)
{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_BIOS_COMMON, s);
+}
+
+static int hclge_dbg_dump_ssu_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_SSU, s);
+}
+
+static int hclge_dbg_dump_igu_egu_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_IGU_EGU, s);
+}
+
+static int hclge_dbg_dump_rpu_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_RPU, s);
+}
+
+static int hclge_dbg_dump_ncsi_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_NCSI, s);
+}
+
+static int hclge_dbg_dump_rtc_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_RTC, s);
+}
+
+static int hclge_dbg_dump_ppp_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_PPP, s);
+}
+
+static int hclge_dbg_dump_rcb_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_RCB, s);
+}
+
+static int hclge_dbg_dump_tqp_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_TQP, s);
+}
+
+static int hclge_dbg_dump_tc(struct seq_file *s, void *data)
+{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_ets_tc_weight_cmd *ets_weight;
+ const char *sch_mode_str;
struct hclge_desc desc;
- char *sch_mode_str;
- int pos = 0;
int ret;
u8 i;
@@ -1323,72 +1291,37 @@ static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len)
ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
- pos += scnprintf(buf + pos, len - pos, "enabled tc number: %u\n",
- hdev->tm_info.num_tc);
- pos += scnprintf(buf + pos, len - pos, "weight_offset: %u\n",
- ets_weight->weight_offset);
+ seq_printf(s, "enabled tc number: %u\n", hdev->tm_info.num_tc);
+ seq_printf(s, "weight_offset: %u\n", ets_weight->weight_offset);
- pos += scnprintf(buf + pos, len - pos, "TC MODE WEIGHT\n");
+ seq_puts(s, "TC MODE WEIGHT\n");
for (i = 0; i < HNAE3_MAX_TC; i++) {
sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp";
- pos += scnprintf(buf + pos, len - pos, "%u %4s %3u\n",
- i, sch_mode_str, ets_weight->tc_weight[i]);
+ seq_printf(s, "%u %4s %3u\n", i, sch_mode_str,
+ ets_weight->tc_weight[i]);
}
return 0;
}
-static const struct hclge_dbg_item tm_pg_items[] = {
- { "ID", 2 },
- { "PRI_MAP", 2 },
- { "MODE", 2 },
- { "DWRR", 2 },
- { "C_IR_B", 2 },
- { "C_IR_U", 2 },
- { "C_IR_S", 2 },
- { "C_BS_B", 2 },
- { "C_BS_S", 2 },
- { "C_FLAG", 2 },
- { "C_RATE(Mbps)", 2 },
- { "P_IR_B", 2 },
- { "P_IR_U", 2 },
- { "P_IR_S", 2 },
- { "P_BS_B", 2 },
- { "P_BS_S", 2 },
- { "P_FLAG", 2 },
- { "P_RATE(Mbps)", 0 }
-};
-
-static void hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para *para,
- char **result, u8 *index)
+static void hclge_dbg_fill_shaper_content(struct seq_file *s,
+ struct hclge_tm_shaper_para *para)
{
- sprintf(result[(*index)++], "%3u", para->ir_b);
- sprintf(result[(*index)++], "%3u", para->ir_u);
- sprintf(result[(*index)++], "%3u", para->ir_s);
- sprintf(result[(*index)++], "%3u", para->bs_b);
- sprintf(result[(*index)++], "%3u", para->bs_s);
- sprintf(result[(*index)++], "%3u", para->flag);
- sprintf(result[(*index)++], "%6u", para->rate);
+ seq_printf(s, "%-8u%-8u%-8u%-8u%-8u%-8u%-14u", para->ir_b, para->ir_u,
+ para->ir_s, para->bs_b, para->bs_s, para->flag, para->rate);
}
-static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str,
- char *buf, int len)
+static int hclge_dbg_dump_tm_pg(struct seq_file *s, void *data)
{
struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
- char *result[ARRAY_SIZE(tm_pg_items)], *sch_mode_str;
- u8 pg_id, sch_mode, weight, pri_bit_map, i, j;
- char content[HCLGE_DBG_TM_INFO_LEN];
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
+ u8 pg_id, sch_mode, weight, pri_bit_map;
+ const char *sch_mode_str;
int ret;
- for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++) {
- result[i] = data_str;
- data_str += HCLGE_DBG_DATA_STR_LEN;
- }
-
- hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
- NULL, ARRAY_SIZE(tm_pg_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_puts(s, "ID PRI_MAP MODE DWRR C_IR_B C_IR_U C_IR_S C_BS_B ");
+ seq_puts(s, "C_BS_S C_FLAG C_RATE(Mbps) P_IR_B P_IR_U P_IR_S ");
+ seq_puts(s, "P_BS_B P_BS_S P_FLAG P_RATE(Mbps)\n");
for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
ret = hclge_tm_get_pg_to_pri_map(hdev, pg_id, &pri_bit_map);
@@ -1418,68 +1351,41 @@ static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str,
sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
"sp";
- j = 0;
- sprintf(result[j++], "%02u", pg_id);
- sprintf(result[j++], "0x%02x", pri_bit_map);
- sprintf(result[j++], "%4s", sch_mode_str);
- sprintf(result[j++], "%3u", weight);
- hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
- hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
-
- hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
- (const char **)result,
- ARRAY_SIZE(tm_pg_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_printf(s, "%02u 0x%-7x%-6s%-6u", pg_id, pri_bit_map,
+ sch_mode_str, weight);
+ hclge_dbg_fill_shaper_content(s, &c_shaper_para);
+ hclge_dbg_fill_shaper_content(s, &p_shaper_para);
+ seq_puts(s, "\n");
}
return 0;
}
-static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
-{
- char *data_str;
- int ret;
-
- data_str = kcalloc(ARRAY_SIZE(tm_pg_items),
- HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL);
- if (!data_str)
- return -ENOMEM;
-
- ret = __hclge_dbg_dump_tm_pg(hdev, data_str, buf, len);
-
- kfree(data_str);
-
- return ret;
-}
-
-static int hclge_dbg_dump_tm_port(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tm_port(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_tm_shaper_para shaper_para;
- int pos = 0;
int ret;
ret = hclge_tm_get_port_shaper(hdev, &shaper_para);
if (ret)
return ret;
- pos += scnprintf(buf + pos, len - pos,
- "IR_B IR_U IR_S BS_B BS_S FLAG RATE(Mbps)\n");
- pos += scnprintf(buf + pos, len - pos,
- "%3u %3u %3u %3u %3u %1u %6u\n",
- shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
- shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
- shaper_para.rate);
+ seq_puts(s, "IR_B IR_U IR_S BS_B BS_S FLAG RATE(Mbps)\n");
+ seq_printf(s, "%3u %3u %3u %3u %3u %1u %6u\n",
+ shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
+ shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
+ shaper_para.rate);
return 0;
}
static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id,
- char *buf, int len)
+ struct seq_file *s)
{
u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM];
struct hclge_bp_to_qs_map_cmd *map;
struct hclge_desc desc;
- int pos = 0;
u8 group_id;
u8 grp_num;
u16 i = 0;
@@ -1505,27 +1411,27 @@ static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id,
qset_mapping[group_id] = le32_to_cpu(map->qs_bit_map);
}
- pos += scnprintf(buf + pos, len - pos, "INDEX | TM BP QSET MAPPING:\n");
+ seq_puts(s, "INDEX | TM BP QSET MAPPING:\n");
for (group_id = 0; group_id < grp_num / 8; group_id++) {
- pos += scnprintf(buf + pos, len - pos,
- "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
- group_id * 256, qset_mapping[i + 7],
- qset_mapping[i + 6], qset_mapping[i + 5],
- qset_mapping[i + 4], qset_mapping[i + 3],
- qset_mapping[i + 2], qset_mapping[i + 1],
- qset_mapping[i]);
+ seq_printf(s,
+ "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
+ group_id * 256, qset_mapping[i + 7],
+ qset_mapping[i + 6], qset_mapping[i + 5],
+ qset_mapping[i + 4], qset_mapping[i + 3],
+ qset_mapping[i + 2], qset_mapping[i + 1],
+ qset_mapping[i]);
i += 8;
}
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tm_map(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
u16 queue_id;
u16 qset_id;
u8 link_vld;
- int pos = 0;
u8 pri_id;
u8 tc_id;
int ret;
@@ -1544,32 +1450,28 @@ static int hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *buf, int len)
if (ret)
return ret;
- pos += scnprintf(buf + pos, len - pos,
- "QUEUE_ID QSET_ID PRI_ID TC_ID\n");
- pos += scnprintf(buf + pos, len - pos,
- "%04u %4u %3u %2u\n",
- queue_id, qset_id, pri_id, tc_id);
+ seq_puts(s, "QUEUE_ID QSET_ID PRI_ID TC_ID\n");
+ seq_printf(s, "%04u %4u %3u %2u\n",
+ queue_id, qset_id, pri_id, tc_id);
if (!hnae3_dev_dcb_supported(hdev))
continue;
- ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, buf + pos,
- len - pos);
+ ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, s);
if (ret < 0)
return ret;
- pos += ret;
- pos += scnprintf(buf + pos, len - pos, "\n");
+ seq_puts(s, "\n");
}
return 0;
}
-static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tm_nodes(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_tm_nodes_cmd *nodes;
struct hclge_desc desc;
- int pos = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
@@ -1582,65 +1484,36 @@ static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
nodes = (struct hclge_tm_nodes_cmd *)desc.data;
- pos += scnprintf(buf + pos, len - pos, " BASE_ID MAX_NUM\n");
- pos += scnprintf(buf + pos, len - pos, "PG %4u %4u\n",
- nodes->pg_base_id, nodes->pg_num);
- pos += scnprintf(buf + pos, len - pos, "PRI %4u %4u\n",
- nodes->pri_base_id, nodes->pri_num);
- pos += scnprintf(buf + pos, len - pos, "QSET %4u %4u\n",
- le16_to_cpu(nodes->qset_base_id),
- le16_to_cpu(nodes->qset_num));
- pos += scnprintf(buf + pos, len - pos, "QUEUE %4u %4u\n",
- le16_to_cpu(nodes->queue_base_id),
- le16_to_cpu(nodes->queue_num));
+ seq_puts(s, " BASE_ID MAX_NUM\n");
+ seq_printf(s, "PG %4u %4u\n", nodes->pg_base_id,
+ nodes->pg_num);
+ seq_printf(s, "PRI %4u %4u\n", nodes->pri_base_id,
+ nodes->pri_num);
+ seq_printf(s, "QSET %4u %4u\n",
+ le16_to_cpu(nodes->qset_base_id),
+ le16_to_cpu(nodes->qset_num));
+ seq_printf(s, "QUEUE %4u %4u\n",
+ le16_to_cpu(nodes->queue_base_id),
+ le16_to_cpu(nodes->queue_num));
return 0;
}
-static const struct hclge_dbg_item tm_pri_items[] = {
- { "ID", 4 },
- { "MODE", 2 },
- { "DWRR", 2 },
- { "C_IR_B", 2 },
- { "C_IR_U", 2 },
- { "C_IR_S", 2 },
- { "C_BS_B", 2 },
- { "C_BS_S", 2 },
- { "C_FLAG", 2 },
- { "C_RATE(Mbps)", 2 },
- { "P_IR_B", 2 },
- { "P_IR_U", 2 },
- { "P_IR_S", 2 },
- { "P_BS_B", 2 },
- { "P_BS_S", 2 },
- { "P_FLAG", 2 },
- { "P_RATE(Mbps)", 0 }
-};
-
-static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tm_pri(struct seq_file *s, void *data)
{
struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
- char *result[ARRAY_SIZE(tm_pri_items)], *sch_mode_str;
- char content[HCLGE_DBG_TM_INFO_LEN];
- u8 pri_num, sch_mode, weight, i, j;
- char *data_str;
- int pos, ret;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
+ u8 pri_num, sch_mode, weight, i;
+ const char *sch_mode_str;
+ int ret;
ret = hclge_tm_get_pri_num(hdev, &pri_num);
if (ret)
return ret;
- data_str = kcalloc(ARRAY_SIZE(tm_pri_items), HCLGE_DBG_DATA_STR_LEN,
- GFP_KERNEL);
- if (!data_str)
- return -ENOMEM;
-
- for (i = 0; i < ARRAY_SIZE(tm_pri_items); i++)
- result[i] = &data_str[i * HCLGE_DBG_DATA_STR_LEN];
-
- hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
- NULL, ARRAY_SIZE(tm_pri_items));
- pos = scnprintf(buf, len, "%s", content);
+ seq_puts(s, "ID MODE DWRR C_IR_B C_IR_U C_IR_S C_BS_B ");
+ seq_puts(s, "C_BS_S C_FLAG C_RATE(Mbps) P_IR_B P_IR_U P_IR_S ");
+ seq_puts(s, "P_BS_B P_BS_S P_FLAG P_RATE(Mbps)\n");
for (i = 0; i < pri_num; i++) {
ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode);
@@ -1666,59 +1539,31 @@ static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
"sp";
- j = 0;
- sprintf(result[j++], "%04u", i);
- sprintf(result[j++], "%4s", sch_mode_str);
- sprintf(result[j++], "%3u", weight);
- hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
- hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
- hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
- (const char **)result,
- ARRAY_SIZE(tm_pri_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_printf(s, "%04u %-6s%-6u", i, sch_mode_str, weight);
+ hclge_dbg_fill_shaper_content(s, &c_shaper_para);
+ hclge_dbg_fill_shaper_content(s, &p_shaper_para);
+ seq_puts(s, "\n");
}
out:
- kfree(data_str);
return ret;
}
-static const struct hclge_dbg_item tm_qset_items[] = {
- { "ID", 4 },
- { "MAP_PRI", 2 },
- { "LINK_VLD", 2 },
- { "MODE", 2 },
- { "DWRR", 2 },
- { "IR_B", 2 },
- { "IR_U", 2 },
- { "IR_S", 2 },
- { "BS_B", 2 },
- { "BS_S", 2 },
- { "FLAG", 2 },
- { "RATE(Mbps)", 0 }
-};
-
-static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tm_qset(struct seq_file *s, void *data)
{
- char data_str[ARRAY_SIZE(tm_qset_items)][HCLGE_DBG_DATA_STR_LEN];
- char *result[ARRAY_SIZE(tm_qset_items)], *sch_mode_str;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
u8 priority, link_vld, sch_mode, weight;
struct hclge_tm_shaper_para shaper_para;
- char content[HCLGE_DBG_TM_INFO_LEN];
+ const char *sch_mode_str;
u16 qset_num, i;
- int ret, pos;
- u8 j;
+ int ret;
ret = hclge_tm_get_qset_num(hdev, &qset_num);
if (ret)
return ret;
- for (i = 0; i < ARRAY_SIZE(tm_qset_items); i++)
- result[i] = &data_str[i][0];
-
- hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
- NULL, ARRAY_SIZE(tm_qset_items));
- pos = scnprintf(buf, len, "%s", content);
+ seq_puts(s, "ID MAP_PRI LINK_VLD MODE DWRR IR_B IR_U IR_S ");
+ seq_puts(s, "BS_B BS_S FLAG RATE(Mbps)\n");
for (i = 0; i < qset_num; i++) {
ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld);
@@ -1740,29 +1585,22 @@ static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
"sp";
- j = 0;
- sprintf(result[j++], "%04u", i);
- sprintf(result[j++], "%4u", priority);
- sprintf(result[j++], "%4u", link_vld);
- sprintf(result[j++], "%4s", sch_mode_str);
- sprintf(result[j++], "%3u", weight);
- hclge_dbg_fill_shaper_content(&shaper_para, result, &j);
-
- hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
- (const char **)result,
- ARRAY_SIZE(tm_qset_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_printf(s, "%04u %-9u%-10u%-6s%-6u", i, priority, link_vld,
+ sch_mode_str, weight);
+ seq_printf(s, "%-6u%-6u%-6u%-6u%-6u%-6u%-14u\n",
+ shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
+ shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
+ shaper_para.rate);
}
return 0;
}
-static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_qos_pause_cfg(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_cfg_pause_param_cmd *pause_param;
struct hclge_desc desc;
- int pos = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
@@ -1775,23 +1613,21 @@ static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
- pos += scnprintf(buf + pos, len - pos, "pause_trans_gap: 0x%x\n",
- pause_param->pause_trans_gap);
- pos += scnprintf(buf + pos, len - pos, "pause_trans_time: 0x%x\n",
- le16_to_cpu(pause_param->pause_trans_time));
+ seq_printf(s, "pause_trans_gap: 0x%x\n", pause_param->pause_trans_gap);
+ seq_printf(s, "pause_trans_time: 0x%x\n",
+ le16_to_cpu(pause_param->pause_trans_time));
return 0;
}
#define HCLGE_DBG_TC_MASK 0x0F
-static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_qos_pri_map(struct seq_file *s, void *data)
{
#define HCLGE_DBG_TC_BIT_WIDTH 4
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_qos_pri_map_cmd *pri_map;
struct hclge_desc desc;
- int pos = 0;
u8 *pri_tc;
u8 tc, i;
int ret;
@@ -1806,33 +1642,33 @@ static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
- pos += scnprintf(buf + pos, len - pos, "vlan_to_pri: 0x%x\n",
- pri_map->vlan_pri);
- pos += scnprintf(buf + pos, len - pos, "PRI TC\n");
+ seq_printf(s, "vlan_to_pri: 0x%x\n", pri_map->vlan_pri);
+ seq_puts(s, "PRI TC\n");
pri_tc = (u8 *)pri_map;
for (i = 0; i < HNAE3_MAX_TC; i++) {
tc = pri_tc[i >> 1] >> ((i & 1) * HCLGE_DBG_TC_BIT_WIDTH);
tc &= HCLGE_DBG_TC_MASK;
- pos += scnprintf(buf + pos, len - pos, "%u %u\n", i, tc);
+ seq_printf(s, "%u %u\n", i, tc);
}
return 0;
}
-static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_qos_dscp_map(struct seq_file *s, void *data)
{
- struct hnae3_knic_private_info *kinfo = &hdev->vport[0].nic.kinfo;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
+ struct hnae3_knic_private_info *kinfo;
u8 *req0 = (u8 *)desc[0].data;
u8 *req1 = (u8 *)desc[1].data;
u8 dscp_tc[HNAE3_MAX_DSCP];
- int pos, ret;
+ int ret;
u8 i, j;
- pos = scnprintf(buf, len, "tc map mode: %s\n",
- tc_map_mode_str[kinfo->tc_map_mode]);
+ kinfo = &hdev->vport[0].nic.kinfo;
+
+ seq_printf(s, "tc map mode: %s\n", tc_map_mode_str[kinfo->tc_map_mode]);
if (kinfo->tc_map_mode != HNAE3_TC_MAP_MODE_DSCP)
return 0;
@@ -1847,7 +1683,7 @@ static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
return ret;
}
- pos += scnprintf(buf + pos, len - pos, "\nDSCP PRIO TC\n");
+ seq_puts(s, "\nDSCP PRIO TC\n");
/* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
@@ -1865,18 +1701,17 @@ static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
if (kinfo->dscp_prio[i] == HNAE3_PRIO_ID_INVALID)
continue;
- pos += scnprintf(buf + pos, len - pos, " %2u %u %u\n",
- i, kinfo->dscp_prio[i], dscp_tc[i]);
+ seq_printf(s, " %2u %u %u\n", i, kinfo->dscp_prio[i],
+ dscp_tc[i]);
}
return 0;
}
-static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
struct hclge_desc desc;
- int pos = 0;
int i, ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true);
@@ -1889,19 +1724,17 @@ static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
- pos += scnprintf(buf + pos, len - pos,
- "tx_packet_buf_tc_%d: 0x%x\n", i,
- le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
+ seq_printf(s, "tx_packet_buf_tc_%d: 0x%x\n", i,
+ le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
struct hclge_desc desc;
- int pos = 0;
int i, ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true);
@@ -1912,26 +1745,24 @@ static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev, char *buf,
return ret;
}
- pos += scnprintf(buf + pos, len - pos, "\n");
+ seq_puts(s, "\n");
rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
- pos += scnprintf(buf + pos, len - pos,
- "rx_packet_buf_tc_%d: 0x%x\n", i,
- le16_to_cpu(rx_buf_cmd->buf_num[i]));
+ seq_printf(s, "rx_packet_buf_tc_%d: 0x%x\n", i,
+ le16_to_cpu(rx_buf_cmd->buf_num[i]));
- pos += scnprintf(buf + pos, len - pos, "rx_share_buf: 0x%x\n",
- le16_to_cpu(rx_buf_cmd->shared_buf));
+ seq_printf(s, "rx_share_buf: 0x%x\n",
+ le16_to_cpu(rx_buf_cmd->shared_buf));
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_rx_com_wl *rx_com_wl;
struct hclge_desc desc;
- int pos = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true);
@@ -1943,21 +1774,19 @@ static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev, char *buf,
}
rx_com_wl = (struct hclge_rx_com_wl *)desc.data;
- pos += scnprintf(buf + pos, len - pos, "\n");
- pos += scnprintf(buf + pos, len - pos,
- "rx_com_wl: high: 0x%x, low: 0x%x\n",
- le16_to_cpu(rx_com_wl->com_wl.high),
- le16_to_cpu(rx_com_wl->com_wl.low));
+ seq_puts(s, "\n");
+ seq_printf(s, "rx_com_wl: high: 0x%x, low: 0x%x\n",
+ le16_to_cpu(rx_com_wl->com_wl.high),
+ le16_to_cpu(rx_com_wl->com_wl.low));
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_rx_com_wl *rx_packet_cnt;
struct hclge_desc desc;
- int pos = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true);
@@ -1969,20 +1798,18 @@ static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev, char *buf,
}
rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data;
- pos += scnprintf(buf + pos, len - pos,
- "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
- le16_to_cpu(rx_packet_cnt->com_wl.high),
- le16_to_cpu(rx_packet_cnt->com_wl.low));
+ seq_printf(s, "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
+ le16_to_cpu(rx_packet_cnt->com_wl.high),
+ le16_to_cpu(rx_packet_cnt->com_wl.low));
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_rx_priv_wl_buf *rx_priv_wl;
struct hclge_desc desc[2];
- int pos = 0;
int i, ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
@@ -1997,28 +1824,25 @@ static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf,
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
- pos += scnprintf(buf + pos, len - pos,
- "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
- le16_to_cpu(rx_priv_wl->tc_wl[i].high),
- le16_to_cpu(rx_priv_wl->tc_wl[i].low));
+ seq_printf(s, "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
+ le16_to_cpu(rx_priv_wl->tc_wl[i].high),
+ le16_to_cpu(rx_priv_wl->tc_wl[i].low));
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
- pos += scnprintf(buf + pos, len - pos,
- "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
- i + HCLGE_TC_NUM_ONE_DESC,
- le16_to_cpu(rx_priv_wl->tc_wl[i].high),
- le16_to_cpu(rx_priv_wl->tc_wl[i].low));
+ seq_printf(s, "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
+ i + HCLGE_TC_NUM_ONE_DESC,
+ le16_to_cpu(rx_priv_wl->tc_wl[i].high),
+ le16_to_cpu(rx_priv_wl->tc_wl[i].low));
- return pos;
+ return 0;
}
static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
- char *buf, int len)
+ struct seq_file *s)
{
struct hclge_rx_com_thrd *rx_com_thrd;
struct hclge_desc desc[2];
- int pos = 0;
int i, ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
@@ -2031,86 +1855,75 @@ static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
return ret;
}
- pos += scnprintf(buf + pos, len - pos, "\n");
+ seq_puts(s, "\n");
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
- pos += scnprintf(buf + pos, len - pos,
- "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
- le16_to_cpu(rx_com_thrd->com_thrd[i].high),
- le16_to_cpu(rx_com_thrd->com_thrd[i].low));
+ seq_printf(s, "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
+ le16_to_cpu(rx_com_thrd->com_thrd[i].high),
+ le16_to_cpu(rx_com_thrd->com_thrd[i].low));
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
- pos += scnprintf(buf + pos, len - pos,
- "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
- i + HCLGE_TC_NUM_ONE_DESC,
- le16_to_cpu(rx_com_thrd->com_thrd[i].high),
- le16_to_cpu(rx_com_thrd->com_thrd[i].low));
+ seq_printf(s, "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
+ i + HCLGE_TC_NUM_ONE_DESC,
+ le16_to_cpu(rx_com_thrd->com_thrd[i].high),
+ le16_to_cpu(rx_com_thrd->com_thrd[i].low));
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_qos_buf_cfg(struct seq_file *s, void *data)
{
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
int ret;
- ret = hclge_dbg_dump_tx_buf_cfg(hdev, buf + pos, len - pos);
+ ret = hclge_dbg_dump_tx_buf_cfg(hdev, s);
if (ret < 0)
return ret;
- pos += ret;
- ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, buf + pos, len - pos);
+ ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, s);
if (ret < 0)
return ret;
- pos += ret;
- ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, buf + pos, len - pos);
+ ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, s);
if (ret < 0)
return ret;
- pos += ret;
- ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, buf + pos, len - pos);
+ ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, s);
if (ret < 0)
return ret;
- pos += ret;
- pos += scnprintf(buf + pos, len - pos, "\n");
+ seq_puts(s, "\n");
if (!hnae3_dev_dcb_supported(hdev))
return 0;
- ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, buf + pos, len - pos);
+ ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, s);
if (ret < 0)
return ret;
- pos += ret;
- ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, buf + pos,
- len - pos);
+ ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, s);
if (ret < 0)
return ret;
return 0;
}
-static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_mng_table(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_mac_ethertype_idx_rd_cmd *req0;
struct hclge_desc desc;
u32 msg_egress_port;
- int pos = 0;
int ret, i;
- pos += scnprintf(buf + pos, len - pos,
- "entry mac_addr mask ether ");
- pos += scnprintf(buf + pos, len - pos,
- "mask vlan mask i_map i_dir e_type ");
- pos += scnprintf(buf + pos, len - pos, "pf_id vf_id q_id drop\n");
+ seq_puts(s, "entry mac_addr mask ether ");
+ seq_puts(s, "mask vlan mask i_map i_dir e_type ");
+ seq_puts(s, "pf_id vf_id q_id drop\n");
for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
true);
- req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data;
+ req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)desc.data;
req0->index = cpu_to_le16(i);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2123,46 +1936,40 @@ static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len)
if (!req0->resp_code)
continue;
- pos += scnprintf(buf + pos, len - pos, "%02u %pM ",
- le16_to_cpu(req0->index), req0->mac_addr);
+ seq_printf(s, "%02u %pM ",
+ le16_to_cpu(req0->index), req0->mac_addr);
- pos += scnprintf(buf + pos, len - pos,
- "%x %04x %x %04x ",
- !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
- le16_to_cpu(req0->ethter_type),
- !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
- le16_to_cpu(req0->vlan_tag) &
- HCLGE_DBG_MNG_VLAN_TAG);
+ seq_printf(s, "%x %04x %x %04x ",
+ !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
+ le16_to_cpu(req0->ethter_type),
+ !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
+ le16_to_cpu(req0->vlan_tag) &
+ HCLGE_DBG_MNG_VLAN_TAG);
- pos += scnprintf(buf + pos, len - pos,
- "%x %02x %02x ",
- !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
- req0->i_port_bitmap, req0->i_port_direction);
+ seq_printf(s, "%x %02x %02x ",
+ !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
+ req0->i_port_bitmap, req0->i_port_direction);
msg_egress_port = le16_to_cpu(req0->egress_port);
- pos += scnprintf(buf + pos, len - pos,
- "%x %x %02x %04x %x\n",
- !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
- msg_egress_port & HCLGE_DBG_MNG_PF_ID,
- (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
- le16_to_cpu(req0->egress_queue),
- !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
+ seq_printf(s, "%x %x %02x %04x %x\n",
+ !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
+ msg_egress_port & HCLGE_DBG_MNG_PF_ID,
+ (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
+ le16_to_cpu(req0->egress_queue),
+ !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
}
return 0;
}
-#define HCLGE_DBG_TCAM_BUF_SIZE 256
-
static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
- char *tcam_buf,
+ struct seq_file *s,
struct hclge_dbg_tcam_msg tcam_msg)
{
struct hclge_fd_tcam_config_1_cmd *req1;
struct hclge_fd_tcam_config_2_cmd *req2;
struct hclge_fd_tcam_config_3_cmd *req3;
struct hclge_desc desc[3];
- int pos = 0;
int ret, i;
__le32 *req;
@@ -2184,27 +1991,23 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
if (ret)
return ret;
- pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "read result tcam key %s(%u):\n", sel_x ? "x" : "y",
- tcam_msg.loc);
+ seq_printf(s, "read result tcam key %s(%u):\n",
+ sel_x ? "x" : "y", tcam_msg.loc);
/* tcam_data0 ~ tcam_data1 */
req = (__le32 *)req1->tcam_data;
for (i = 0; i < 2; i++)
- pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "%08x\n", le32_to_cpu(*req++));
+ seq_printf(s, "%08x\n", le32_to_cpu(*req++));
/* tcam_data2 ~ tcam_data7 */
req = (__le32 *)req2->tcam_data;
for (i = 0; i < 6; i++)
- pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "%08x\n", le32_to_cpu(*req++));
+ seq_printf(s, "%08x\n", le32_to_cpu(*req++));
/* tcam_data8 ~ tcam_data12 */
req = (__le32 *)req3->tcam_data;
for (i = 0; i < 5; i++)
- pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "%08x\n", le32_to_cpu(*req++));
+ seq_printf(s, "%08x\n", le32_to_cpu(*req++));
return ret;
}
@@ -2228,14 +2031,13 @@ static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
return cnt;
}
-static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_fd_tcam(struct seq_file *s, void *data)
{
- u32 rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_dbg_tcam_msg tcam_msg;
int i, ret, rule_cnt;
u16 *rule_locs;
- char *tcam_buf;
- int pos = 0;
+ u32 rule_num;
if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
dev_err(&hdev->pdev->dev,
@@ -2243,6 +2045,7 @@ static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
return -EOPNOTSUPP;
}
+ rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
if (!hdev->hclge_fd_rule_num || !rule_num)
return 0;
@@ -2250,12 +2053,6 @@ static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
if (!rule_locs)
return -ENOMEM;
- tcam_buf = kzalloc(HCLGE_DBG_TCAM_BUF_SIZE, GFP_KERNEL);
- if (!tcam_buf) {
- kfree(rule_locs);
- return -ENOMEM;
- }
-
rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs);
if (rule_cnt < 0) {
ret = rule_cnt;
@@ -2269,38 +2066,34 @@ static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
tcam_msg.stage = HCLGE_FD_STAGE_1;
tcam_msg.loc = rule_locs[i];
- ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf, tcam_msg);
+ ret = hclge_dbg_fd_tcam_read(hdev, true, s, tcam_msg);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get fd tcam key x, ret = %d\n", ret);
goto out;
}
- pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
-
- ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf, tcam_msg);
+ ret = hclge_dbg_fd_tcam_read(hdev, false, s, tcam_msg);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get fd tcam key y, ret = %d\n", ret);
goto out;
}
- pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
}
out:
- kfree(tcam_buf);
kfree(rule_locs);
return ret;
}
-static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_fd_counter(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
struct hclge_fd_ad_cnt_read_cmd *req;
char str_id[HCLGE_DBG_ID_LEN];
struct hclge_desc desc;
- int pos = 0;
int ret;
u64 cnt;
u8 i;
@@ -2308,8 +2101,7 @@ static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
- pos += scnprintf(buf + pos, len - pos,
- "func_id\thit_times\n");
+ seq_puts(s, "func_id\thit_times\n");
for (i = 0; i < func_num; i++) {
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_CNT_OP, true);
@@ -2323,8 +2115,7 @@ static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
}
cnt = le64_to_cpu(req->cnt);
hclge_dbg_get_func_id_str(str_id, i);
- pos += scnprintf(buf + pos, len - pos,
- "%s\t%llu\n", str_id, cnt);
+ seq_printf(s, "%s\t%llu\n", str_id, cnt);
}
return 0;
@@ -2375,74 +2166,95 @@ int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
return 0;
}
-static int hclge_dbg_dump_serv_info(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_seq_dump_rst_info(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
+ u32 i, offset;
+
+ seq_printf(s, "PF reset count: %u\n", hdev->rst_stats.pf_rst_cnt);
+ seq_printf(s, "FLR reset count: %u\n", hdev->rst_stats.flr_rst_cnt);
+ seq_printf(s, "GLOBAL reset count: %u\n",
+ hdev->rst_stats.global_rst_cnt);
+ seq_printf(s, "IMP reset count: %u\n", hdev->rst_stats.imp_rst_cnt);
+ seq_printf(s, "reset done count: %u\n", hdev->rst_stats.reset_done_cnt);
+ seq_printf(s, "HW reset done count: %u\n",
+ hdev->rst_stats.hw_reset_done_cnt);
+ seq_printf(s, "reset count: %u\n", hdev->rst_stats.reset_cnt);
+ seq_printf(s, "reset fail count: %u\n", hdev->rst_stats.reset_fail_cnt);
+
+ for (i = 0; i < ARRAY_SIZE(hclge_dbg_rst_info); i++) {
+ offset = hclge_dbg_rst_info[i].offset;
+ seq_printf(s, "%s: 0x%x\n",
+ hclge_dbg_rst_info[i].message,
+ hclge_read_dev(&hdev->hw, offset));
+ }
+
+ seq_printf(s, "hdev state: 0x%lx\n", hdev->state);
+
+ return 0;
+}
+
+static int hclge_dbg_dump_serv_info(struct seq_file *s, void *data)
+{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
unsigned long rem_nsec;
- int pos = 0;
u64 lc;
lc = local_clock();
rem_nsec = do_div(lc, HCLGE_BILLION_NANO_SECONDS);
- pos += scnprintf(buf + pos, len - pos, "local_clock: [%5lu.%06lu]\n",
- (unsigned long)lc, rem_nsec / 1000);
- pos += scnprintf(buf + pos, len - pos, "delta: %u(ms)\n",
- jiffies_to_msecs(jiffies - hdev->last_serv_processed));
- pos += scnprintf(buf + pos, len - pos,
- "last_service_task_processed: %lu(jiffies)\n",
- hdev->last_serv_processed);
- pos += scnprintf(buf + pos, len - pos, "last_service_task_cnt: %lu\n",
- hdev->serv_processed_cnt);
+ seq_printf(s, "local_clock: [%5lu.%06lu]\n",
+ (unsigned long)lc, rem_nsec / 1000);
+ seq_printf(s, "delta: %u(ms)\n",
+ jiffies_to_msecs(jiffies - hdev->last_serv_processed));
+ seq_printf(s, "last_service_task_processed: %lu(jiffies)\n",
+ hdev->last_serv_processed);
+ seq_printf(s, "last_service_task_cnt: %lu\n", hdev->serv_processed_cnt);
return 0;
}
-static int hclge_dbg_dump_interrupt(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_interrupt(struct seq_file *s, void *data)
{
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
- pos += scnprintf(buf + pos, len - pos, "num_nic_msi: %u\n",
- hdev->num_nic_msi);
- pos += scnprintf(buf + pos, len - pos, "num_roce_msi: %u\n",
- hdev->num_roce_msi);
- pos += scnprintf(buf + pos, len - pos, "num_msi_used: %u\n",
- hdev->num_msi_used);
- pos += scnprintf(buf + pos, len - pos, "num_msi_left: %u\n",
- hdev->num_msi_left);
+ seq_printf(s, "num_nic_msi: %u\n", hdev->num_nic_msi);
+ seq_printf(s, "num_roce_msi: %u\n", hdev->num_roce_msi);
+ seq_printf(s, "num_msi_used: %u\n", hdev->num_msi_used);
+ seq_printf(s, "num_msi_left: %u\n", hdev->num_msi_left);
return 0;
}
-static void hclge_dbg_imp_info_data_print(struct hclge_desc *desc_src,
- char *buf, int len, u32 bd_num)
+static void hclge_dbg_imp_info_data_print(struct seq_file *s,
+ struct hclge_desc *desc_src,
+ u32 bd_num)
{
#define HCLGE_DBG_IMP_INFO_PRINT_OFFSET 0x2
struct hclge_desc *desc_index = desc_src;
u32 offset = 0;
- int pos = 0;
u32 i, j;
- pos += scnprintf(buf + pos, len - pos, "offset | data\n");
+ seq_puts(s, "offset | data\n");
for (i = 0; i < bd_num; i++) {
j = 0;
while (j < HCLGE_DESC_DATA_LEN - 1) {
- pos += scnprintf(buf + pos, len - pos, "0x%04x | ",
- offset);
- pos += scnprintf(buf + pos, len - pos, "0x%08x ",
- le32_to_cpu(desc_index->data[j++]));
- pos += scnprintf(buf + pos, len - pos, "0x%08x\n",
- le32_to_cpu(desc_index->data[j++]));
+ seq_printf(s, "0x%04x | ", offset);
+ seq_printf(s, "0x%08x ",
+ le32_to_cpu(desc_index->data[j++]));
+ seq_printf(s, "0x%08x\n",
+ le32_to_cpu(desc_index->data[j++]));
offset += sizeof(u32) * HCLGE_DBG_IMP_INFO_PRINT_OFFSET;
}
desc_index++;
}
}
-static int
-hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_get_imp_stats_info(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_get_imp_bd_cmd *req;
struct hclge_desc *desc_src;
struct hclge_desc desc;
@@ -2479,7 +2291,7 @@ hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
return ret;
}
- hclge_dbg_imp_info_data_print(desc_src, buf, len, bd_num);
+ hclge_dbg_imp_info_data_print(s, desc_src, bd_num);
kfree(desc_src);
@@ -2490,7 +2302,7 @@ hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
#define HCLGE_MAX_NCL_CONFIG_LENGTH 16384
static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
- char *buf, int len, int *pos)
+ struct seq_file *s)
{
#define HCLGE_CMD_DATA_NUM 6
@@ -2502,9 +2314,8 @@ static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
if (i == 0 && j == 0)
continue;
- *pos += scnprintf(buf + *pos, len - *pos,
- "0x%04x | 0x%08x\n", offset,
- le32_to_cpu(desc[i].data[j]));
+ seq_printf(s, "0x%04x | 0x%08x\n", offset,
+ le32_to_cpu(desc[i].data[j]));
offset += sizeof(u32);
*index -= sizeof(u32);
@@ -2515,19 +2326,18 @@ static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
}
}
-static int
-hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_ncl_config(struct seq_file *s, void *data)
{
#define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4)
struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
int index = HCLGE_MAX_NCL_CONFIG_LENGTH;
- int pos = 0;
u32 data0;
int ret;
- pos += scnprintf(buf + pos, len - pos, "offset | data\n");
+ seq_puts(s, "offset | data\n");
while (index > 0) {
data0 = HCLGE_MAX_NCL_CONFIG_LENGTH - index;
@@ -2540,27 +2350,26 @@ hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len)
if (ret)
return ret;
- hclge_ncl_config_data_print(desc, &index, buf, len, &pos);
+ hclge_ncl_config_data_print(desc, &index, s);
}
return 0;
}
-static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_loopback(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct phy_device *phydev = hdev->hw.mac.phydev;
struct hclge_config_mac_mode_cmd *req_app;
struct hclge_common_lb_cmd *req_common;
struct hclge_desc desc;
u8 loopback_en;
- int pos = 0;
int ret;
req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
req_common = (struct hclge_common_lb_cmd *)desc.data;
- pos += scnprintf(buf + pos, len - pos, "mac id: %u\n",
- hdev->hw.mac.mac_id);
+ seq_printf(s, "mac id: %u\n", hdev->hw.mac.mac_id);
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2572,8 +2381,7 @@ static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
HCLGE_MAC_APP_LP_B);
- pos += scnprintf(buf + pos, len - pos, "app loopback: %s\n",
- str_on_off(loopback_en));
+ seq_printf(s, "app loopback: %s\n", str_on_off(loopback_en));
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2584,24 +2392,22 @@ static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
return ret;
}
- loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
- pos += scnprintf(buf + pos, len - pos, "serdes serial loopback: %s\n",
- str_on_off(loopback_en));
+ loopback_en = req_common->enable &
+ HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+ seq_printf(s, "serdes serial loopback: %s\n", str_on_off(loopback_en));
loopback_en = req_common->enable &
- HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
- pos += scnprintf(buf + pos, len - pos, "serdes parallel loopback: %s\n",
- str_on_off(loopback_en));
+ HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
+ seq_printf(s, "serdes parallel loopback: %s\n",
+ str_on_off(loopback_en));
if (phydev) {
loopback_en = phydev->loopback_enabled;
- pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
- str_on_off(loopback_en));
+ seq_printf(s, "phy loopback: %s\n", str_on_off(loopback_en));
} else if (hnae3_dev_phy_imp_supported(hdev)) {
loopback_en = req_common->enable &
HCLGE_CMD_GE_PHY_INNER_LOOP_B;
- pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
- str_on_off(loopback_en));
+ seq_printf(s, "phy loopback: %s\n", str_on_off(loopback_en));
}
return 0;
@@ -2610,107 +2416,75 @@ static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
/* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
* @hdev: pointer to struct hclge_dev
*/
-static int
-hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_mac_tnl_status(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_mac_tnl_stats stats;
unsigned long rem_nsec;
- int pos = 0;
- pos += scnprintf(buf + pos, len - pos,
- "Recently generated mac tnl interruption:\n");
+ seq_puts(s, "Recently generated mac tnl interruption:\n");
while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
- pos += scnprintf(buf + pos, len - pos,
- "[%07lu.%03lu] status = 0x%x\n",
- (unsigned long)stats.time, rem_nsec / 1000,
- stats.status);
+ seq_printf(s, "[%07lu.%03lu] status = 0x%x\n",
+ (unsigned long)stats.time, rem_nsec / 1000,
+ stats.status);
}
return 0;
}
-
-static const struct hclge_dbg_item mac_list_items[] = {
- { "FUNC_ID", 2 },
- { "MAC_ADDR", 12 },
- { "STATE", 2 },
-};
-
-static void hclge_dbg_dump_mac_list(struct hclge_dev *hdev, char *buf, int len,
- bool is_unicast)
+static void hclge_dbg_dump_mac_list(struct seq_file *s, bool is_unicast)
{
- char data_str[ARRAY_SIZE(mac_list_items)][HCLGE_DBG_DATA_STR_LEN];
- char content[HCLGE_DBG_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
- char *result[ARRAY_SIZE(mac_list_items)];
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_mac_node *mac_node, *tmp;
struct hclge_vport *vport;
struct list_head *list;
u32 func_id;
- int pos = 0;
- int i;
- for (i = 0; i < ARRAY_SIZE(mac_list_items); i++)
- result[i] = &data_str[i][0];
-
- pos += scnprintf(buf + pos, len - pos, "%s MAC_LIST:\n",
- is_unicast ? "UC" : "MC");
- hclge_dbg_fill_content(content, sizeof(content), mac_list_items,
- NULL, ARRAY_SIZE(mac_list_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_printf(s, "%s MAC_LIST:\n", is_unicast ? "UC" : "MC");
+ seq_puts(s, "FUNC_ID MAC_ADDR STATE\n");
for (func_id = 0; func_id < hdev->num_alloc_vport; func_id++) {
vport = &hdev->vport[func_id];
list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
spin_lock_bh(&vport->mac_list_lock);
list_for_each_entry_safe(mac_node, tmp, list, node) {
- i = 0;
- result[i++] = hclge_dbg_get_func_id_str(str_id,
- func_id);
- sprintf(result[i++], "%pM", mac_node->mac_addr);
- sprintf(result[i++], "%5s",
- hclge_mac_state_str[mac_node->state]);
- hclge_dbg_fill_content(content, sizeof(content),
- mac_list_items,
- (const char **)result,
- ARRAY_SIZE(mac_list_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ if (func_id)
+ seq_printf(s, "vf%-7u", func_id - 1U);
+ else
+ seq_puts(s, "pf ");
+ seq_printf(s, "%pM ", mac_node->mac_addr);
+ seq_printf(s, "%5s\n",
+ hclge_mac_state_str[mac_node->state]);
}
spin_unlock_bh(&vport->mac_list_lock);
}
}
-static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_umv_info(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
u8 func_num = pci_num_vf(hdev->pdev) + 1;
struct hclge_vport *vport;
- int pos = 0;
u8 i;
- pos += scnprintf(buf, len, "num_alloc_vport : %u\n",
- hdev->num_alloc_vport);
- pos += scnprintf(buf + pos, len - pos, "max_umv_size : %u\n",
- hdev->max_umv_size);
- pos += scnprintf(buf + pos, len - pos, "wanted_umv_size : %u\n",
- hdev->wanted_umv_size);
- pos += scnprintf(buf + pos, len - pos, "priv_umv_size : %u\n",
- hdev->priv_umv_size);
+ seq_printf(s, "num_alloc_vport : %u\n", hdev->num_alloc_vport);
+ seq_printf(s, "max_umv_size : %u\n", hdev->max_umv_size);
+ seq_printf(s, "wanted_umv_size : %u\n", hdev->wanted_umv_size);
+ seq_printf(s, "priv_umv_size : %u\n", hdev->priv_umv_size);
mutex_lock(&hdev->vport_lock);
- pos += scnprintf(buf + pos, len - pos, "share_umv_size : %u\n",
- hdev->share_umv_size);
+ seq_printf(s, "share_umv_size : %u\n", hdev->share_umv_size);
for (i = 0; i < func_num; i++) {
vport = &hdev->vport[i];
- pos += scnprintf(buf + pos, len - pos,
- "vport(%u) used_umv_num : %u\n",
- i, vport->used_umv_num);
+ seq_printf(s, "vport(%u) used_umv_num : %u\n",
+ i, vport->used_umv_num);
}
mutex_unlock(&hdev->vport_lock);
- pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num : %u\n",
- hdev->used_mc_mac_num);
+ seq_printf(s, "used_mc_mac_num : %u\n", hdev->used_mc_mac_num);
return 0;
}
@@ -2852,38 +2626,12 @@ static int hclge_get_port_vlan_filter_bypass_state(struct hclge_dev *hdev,
return 0;
}
-static const struct hclge_dbg_item vlan_filter_items[] = {
- { "FUNC_ID", 2 },
- { "I_VF_VLAN_FILTER", 2 },
- { "E_VF_VLAN_FILTER", 2 },
- { "PORT_VLAN_FILTER_BYPASS", 0 }
-};
-
-static const struct hclge_dbg_item vlan_offload_items[] = {
- { "FUNC_ID", 2 },
- { "PVID", 4 },
- { "ACCEPT_TAG1", 2 },
- { "ACCEPT_TAG2", 2 },
- { "ACCEPT_UNTAG1", 2 },
- { "ACCEPT_UNTAG2", 2 },
- { "INSERT_TAG1", 2 },
- { "INSERT_TAG2", 2 },
- { "SHIFT_TAG", 2 },
- { "STRIP_TAG1", 2 },
- { "STRIP_TAG2", 2 },
- { "DROP_TAG1", 2 },
- { "DROP_TAG2", 2 },
- { "PRI_ONLY_TAG1", 2 },
- { "PRI_ONLY_TAG2", 0 }
-};
-
-static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
- int len, int *pos)
+static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev,
+ struct seq_file *s)
{
- char content[HCLGE_DBG_VLAN_FLTR_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
- const char *result[ARRAY_SIZE(vlan_filter_items)];
- u8 i, j, vlan_fe, bypass, ingress, egress;
u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
+ u8 i, vlan_fe, bypass, ingress, egress;
+ char str_id[HCLGE_DBG_ID_LEN];
int ret;
ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_PORT, 0,
@@ -2893,14 +2641,11 @@ static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
- *pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n",
- str_on_off(ingress));
- *pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n",
- str_on_off(egress));
+ seq_printf(s, "I_PORT_VLAN_FILTER: %s\n", str_on_off(ingress));
+ seq_printf(s, "E_PORT_VLAN_FILTER: %s\n", str_on_off(egress));
- hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items,
- NULL, ARRAY_SIZE(vlan_filter_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ seq_puts(s, "FUNC_ID I_VF_VLAN_FILTER E_VF_VLAN_FILTER ");
+ seq_puts(s, "PORT_VLAN_FILTER_BYPASS\n");
for (i = 0; i < func_num; i++) {
ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_VF, i,
@@ -2913,37 +2658,32 @@ static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
ret = hclge_get_port_vlan_filter_bypass_state(hdev, i, &bypass);
if (ret)
return ret;
- j = 0;
- result[j++] = hclge_dbg_get_func_id_str(str_id, i);
- result[j++] = str_on_off(ingress);
- result[j++] = str_on_off(egress);
- result[j++] = test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
- hdev->ae_dev->caps) ?
- str_on_off(bypass) : "NA";
- hclge_dbg_fill_content(content, sizeof(content),
- vlan_filter_items, result,
- ARRAY_SIZE(vlan_filter_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+
+ seq_printf(s, "%-9s%-18s%-18s%s\n",
+ hclge_dbg_get_func_id_str(str_id, i),
+ str_on_off(ingress), str_on_off(egress),
+ test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
+ hdev->ae_dev->caps) ?
+ str_on_off(bypass) : "NA");
}
- *pos += scnprintf(buf + *pos, len - *pos, "\n");
+ seq_puts(s, "\n");
return 0;
}
-static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
- int len, int *pos)
+static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev,
+ struct seq_file *s)
{
- char str_id[HCLGE_DBG_ID_LEN], str_pvid[HCLGE_DBG_ID_LEN];
- const char *result[ARRAY_SIZE(vlan_offload_items)];
- char content[HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN];
u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
struct hclge_dbg_vlan_cfg vlan_cfg;
+ char str_id[HCLGE_DBG_ID_LEN];
int ret;
- u8 i, j;
+ u8 i;
- hclge_dbg_fill_content(content, sizeof(content), vlan_offload_items,
- NULL, ARRAY_SIZE(vlan_offload_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ seq_puts(s, "FUNC_ID PVID ACCEPT_TAG1 ACCEPT_TAG2 ACCEPT_UNTAG1 ");
+ seq_puts(s, "ACCEPT_UNTAG2 INSERT_TAG1 INSERT_TAG2 SHIFT_TAG ");
+ seq_puts(s, "STRIP_TAG1 STRIP_TAG2 DROP_TAG1 DROP_TAG2 ");
+ seq_puts(s, "PRI_ONLY_TAG1 PRI_ONLY_TAG2\n");
for (i = 0; i < func_num; i++) {
ret = hclge_get_vlan_tx_offload_cfg(hdev, i, &vlan_cfg);
@@ -2954,106 +2694,92 @@ static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
if (ret)
return ret;
- sprintf(str_pvid, "%u", vlan_cfg.pvid);
- j = 0;
- result[j++] = hclge_dbg_get_func_id_str(str_id, i);
- result[j++] = str_pvid;
- result[j++] = str_on_off(vlan_cfg.accept_tag1);
- result[j++] = str_on_off(vlan_cfg.accept_tag2);
- result[j++] = str_on_off(vlan_cfg.accept_untag1);
- result[j++] = str_on_off(vlan_cfg.accept_untag2);
- result[j++] = str_on_off(vlan_cfg.insert_tag1);
- result[j++] = str_on_off(vlan_cfg.insert_tag2);
- result[j++] = str_on_off(vlan_cfg.shift_tag);
- result[j++] = str_on_off(vlan_cfg.strip_tag1);
- result[j++] = str_on_off(vlan_cfg.strip_tag2);
- result[j++] = str_on_off(vlan_cfg.drop_tag1);
- result[j++] = str_on_off(vlan_cfg.drop_tag2);
- result[j++] = str_on_off(vlan_cfg.pri_only1);
- result[j++] = str_on_off(vlan_cfg.pri_only2);
-
- hclge_dbg_fill_content(content, sizeof(content),
- vlan_offload_items, result,
- ARRAY_SIZE(vlan_offload_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ seq_printf(s, "%-9s", hclge_dbg_get_func_id_str(str_id, i));
+ seq_printf(s, "%-6u", vlan_cfg.pvid);
+ seq_printf(s, "%-13s", str_on_off(vlan_cfg.accept_tag1));
+ seq_printf(s, "%-12s", str_on_off(vlan_cfg.accept_tag2));
+ seq_printf(s, "%-15s", str_on_off(vlan_cfg.accept_untag1));
+ seq_printf(s, "%-15s", str_on_off(vlan_cfg.accept_untag2));
+ seq_printf(s, "%-13s", str_on_off(vlan_cfg.insert_tag1));
+ seq_printf(s, "%-13s", str_on_off(vlan_cfg.insert_tag2));
+ seq_printf(s, "%-11s", str_on_off(vlan_cfg.shift_tag));
+ seq_printf(s, "%-12s", str_on_off(vlan_cfg.strip_tag1));
+ seq_printf(s, "%-12s", str_on_off(vlan_cfg.strip_tag2));
+ seq_printf(s, "%-11s", str_on_off(vlan_cfg.drop_tag1));
+ seq_printf(s, "%-11s", str_on_off(vlan_cfg.drop_tag2));
+ seq_printf(s, "%-15s", str_on_off(vlan_cfg.pri_only1));
+ seq_printf(s, "%s\n", str_on_off(vlan_cfg.pri_only2));
}
return 0;
}
-static int hclge_dbg_dump_vlan_config(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_vlan_config(struct seq_file *s, void *data)
{
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
int ret;
- ret = hclge_dbg_dump_vlan_filter_config(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_vlan_filter_config(hdev, s);
if (ret)
return ret;
- return hclge_dbg_dump_vlan_offload_config(hdev, buf, len, &pos);
+ return hclge_dbg_dump_vlan_offload_config(hdev, s);
}
-static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_ptp_info(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_ptp *ptp = hdev->ptp;
u32 sw_cfg = ptp->ptp_cfg;
unsigned int tx_start;
unsigned int last_rx;
- int pos = 0;
u32 hw_cfg;
int ret;
- pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n",
- ptp->info.name);
- pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n",
- str_yes_no(test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags)));
- pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n",
- str_yes_no(test_bit(HCLGE_PTP_FLAG_TX_EN,
- &ptp->flags)));
- pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n",
- str_yes_no(test_bit(HCLGE_PTP_FLAG_RX_EN,
- &ptp->flags)));
+ seq_printf(s, "phc %s's debug info:\n", ptp->info.name);
+ seq_printf(s, "ptp enable: %s\n",
+ str_yes_no(test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags)));
+ seq_printf(s, "ptp tx enable: %s\n",
+ str_yes_no(test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags)));
+ seq_printf(s, "ptp rx enable: %s\n",
+ str_yes_no(test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags)));
last_rx = jiffies_to_msecs(ptp->last_rx);
- pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n",
- last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC);
- pos += scnprintf(buf + pos, len - pos, "rx count: %lu\n", ptp->rx_cnt);
+ seq_printf(s, "last rx time: %lu.%lu\n",
+ last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC);
+ seq_printf(s, "rx count: %lu\n", ptp->rx_cnt);
tx_start = jiffies_to_msecs(ptp->tx_start);
- pos += scnprintf(buf + pos, len - pos, "last tx start time: %lu.%lu\n",
- tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC);
- pos += scnprintf(buf + pos, len - pos, "tx count: %lu\n", ptp->tx_cnt);
- pos += scnprintf(buf + pos, len - pos, "tx skipped count: %lu\n",
- ptp->tx_skipped);
- pos += scnprintf(buf + pos, len - pos, "tx timeout count: %lu\n",
- ptp->tx_timeout);
- pos += scnprintf(buf + pos, len - pos, "last tx seqid: %u\n",
- ptp->last_tx_seqid);
+ seq_printf(s, "last tx start time: %lu.%lu\n",
+ tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC);
+ seq_printf(s, "tx count: %lu\n", ptp->tx_cnt);
+ seq_printf(s, "tx skipped count: %lu\n", ptp->tx_skipped);
+ seq_printf(s, "tx timeout count: %lu\n", ptp->tx_timeout);
+ seq_printf(s, "last tx seqid: %u\n", ptp->last_tx_seqid);
+
ret = hclge_ptp_cfg_qry(hdev, &hw_cfg);
if (ret)
return ret;
- pos += scnprintf(buf + pos, len - pos, "sw_cfg: %#x, hw_cfg: %#x\n",
- sw_cfg, hw_cfg);
+ seq_printf(s, "sw_cfg: %#x, hw_cfg: %#x\n", sw_cfg, hw_cfg);
- pos += scnprintf(buf + pos, len - pos, "tx type: %d, rx filter: %d\n",
- ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter);
+ seq_printf(s, "tx type: %d, rx filter: %d\n",
+ ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter);
return 0;
}
-static int hclge_dbg_dump_mac_uc(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_mac_uc(struct seq_file *s, void *data)
{
- hclge_dbg_dump_mac_list(hdev, buf, len, true);
+ hclge_dbg_dump_mac_list(s, true);
return 0;
}
-static int hclge_dbg_dump_mac_mc(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_mac_mc(struct seq_file *s, void *data)
{
- hclge_dbg_dump_mac_list(hdev, buf, len, false);
+ hclge_dbg_dump_mac_list(s, false);
return 0;
}
@@ -3061,156 +2787,156 @@ static int hclge_dbg_dump_mac_mc(struct hclge_dev *hdev, char *buf, int len)
static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
{
.cmd = HNAE3_DBG_CMD_TM_NODES,
- .dbg_dump = hclge_dbg_dump_tm_nodes,
+ .dbg_read_func = hclge_dbg_dump_tm_nodes,
},
{
.cmd = HNAE3_DBG_CMD_TM_PRI,
- .dbg_dump = hclge_dbg_dump_tm_pri,
+ .dbg_read_func = hclge_dbg_dump_tm_pri,
},
{
.cmd = HNAE3_DBG_CMD_TM_QSET,
- .dbg_dump = hclge_dbg_dump_tm_qset,
+ .dbg_read_func = hclge_dbg_dump_tm_qset,
},
{
.cmd = HNAE3_DBG_CMD_TM_MAP,
- .dbg_dump = hclge_dbg_dump_tm_map,
+ .dbg_read_func = hclge_dbg_dump_tm_map,
},
{
.cmd = HNAE3_DBG_CMD_TM_PG,
- .dbg_dump = hclge_dbg_dump_tm_pg,
+ .dbg_read_func = hclge_dbg_dump_tm_pg,
},
{
.cmd = HNAE3_DBG_CMD_TM_PORT,
- .dbg_dump = hclge_dbg_dump_tm_port,
+ .dbg_read_func = hclge_dbg_dump_tm_port,
},
{
.cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
- .dbg_dump = hclge_dbg_dump_tc,
+ .dbg_read_func = hclge_dbg_dump_tc,
},
{
.cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
- .dbg_dump = hclge_dbg_dump_qos_pause_cfg,
+ .dbg_read_func = hclge_dbg_dump_qos_pause_cfg,
},
{
.cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
- .dbg_dump = hclge_dbg_dump_qos_pri_map,
+ .dbg_read_func = hclge_dbg_dump_qos_pri_map,
},
{
.cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
- .dbg_dump = hclge_dbg_dump_qos_dscp_map,
+ .dbg_read_func = hclge_dbg_dump_qos_dscp_map,
},
{
.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
- .dbg_dump = hclge_dbg_dump_qos_buf_cfg,
+ .dbg_read_func = hclge_dbg_dump_qos_buf_cfg,
},
{
.cmd = HNAE3_DBG_CMD_MAC_UC,
- .dbg_dump = hclge_dbg_dump_mac_uc,
+ .dbg_read_func = hclge_dbg_dump_mac_uc,
},
{
.cmd = HNAE3_DBG_CMD_MAC_MC,
- .dbg_dump = hclge_dbg_dump_mac_mc,
+ .dbg_read_func = hclge_dbg_dump_mac_mc,
},
{
.cmd = HNAE3_DBG_CMD_MNG_TBL,
- .dbg_dump = hclge_dbg_dump_mng_table,
+ .dbg_read_func = hclge_dbg_dump_mng_table,
},
{
.cmd = HNAE3_DBG_CMD_LOOPBACK,
- .dbg_dump = hclge_dbg_dump_loopback,
+ .dbg_read_func = hclge_dbg_dump_loopback,
},
{
.cmd = HNAE3_DBG_CMD_PTP_INFO,
- .dbg_dump = hclge_dbg_dump_ptp_info,
+ .dbg_read_func = hclge_dbg_dump_ptp_info,
},
{
.cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
- .dbg_dump = hclge_dbg_dump_interrupt,
+ .dbg_read_func = hclge_dbg_dump_interrupt,
},
{
.cmd = HNAE3_DBG_CMD_RESET_INFO,
- .dbg_dump = hclge_dbg_dump_rst_info,
+ .dbg_read_func = hclge_dbg_seq_dump_rst_info,
},
{
.cmd = HNAE3_DBG_CMD_IMP_INFO,
- .dbg_dump = hclge_dbg_get_imp_stats_info,
+ .dbg_read_func = hclge_dbg_get_imp_stats_info,
},
{
.cmd = HNAE3_DBG_CMD_NCL_CONFIG,
- .dbg_dump = hclge_dbg_dump_ncl_config,
+ .dbg_read_func = hclge_dbg_dump_ncl_config,
},
{
.cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_bios_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_SSU,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_ssu_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_igu_egu_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_RPU,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_rpu_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_NCSI,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_ncsi_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_RTC,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_rtc_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_PPP,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_ppp_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_RCB,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_rcb_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_TQP,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_tqp_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_MAC,
- .dbg_dump = hclge_dbg_dump_mac,
+ .dbg_read_func = hclge_dbg_dump_mac,
},
{
.cmd = HNAE3_DBG_CMD_REG_DCB,
- .dbg_dump = hclge_dbg_dump_dcb,
+ .dbg_read_func = hclge_dbg_dump_dcb,
},
{
.cmd = HNAE3_DBG_CMD_FD_TCAM,
- .dbg_dump = hclge_dbg_dump_fd_tcam,
+ .dbg_read_func = hclge_dbg_dump_fd_tcam,
},
{
.cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
- .dbg_dump = hclge_dbg_dump_mac_tnl_status,
+ .dbg_read_func = hclge_dbg_dump_mac_tnl_status,
},
{
.cmd = HNAE3_DBG_CMD_SERV_INFO,
- .dbg_dump = hclge_dbg_dump_serv_info,
+ .dbg_read_func = hclge_dbg_dump_serv_info,
},
{
.cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
- .dbg_dump = hclge_dbg_dump_vlan_config,
+ .dbg_read_func = hclge_dbg_dump_vlan_config,
},
{
.cmd = HNAE3_DBG_CMD_FD_COUNTER,
- .dbg_dump = hclge_dbg_dump_fd_counter,
+ .dbg_read_func = hclge_dbg_dump_fd_counter,
},
{
.cmd = HNAE3_DBG_CMD_UMV_INFO,
- .dbg_dump = hclge_dbg_dump_umv_info,
+ .dbg_read_func = hclge_dbg_dump_umv_info,
},
};
-int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
- char *buf, int len)
+int hclge_dbg_get_read_func(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
+ read_func *func)
{
struct hclge_vport *vport = hclge_get_vport(handle);
const struct hclge_dbg_func *cmd_func;
@@ -3220,11 +2946,8 @@ int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
for (i = 0; i < ARRAY_SIZE(hclge_dbg_cmd_func); i++) {
if (cmd == hclge_dbg_cmd_func[i].cmd) {
cmd_func = &hclge_dbg_cmd_func[i];
- if (cmd_func->dbg_dump)
- return cmd_func->dbg_dump(hdev, buf, len);
- else
- return cmd_func->dbg_dump_reg(hdev, cmd, buf,
- len);
+ *func = cmd_func->dbg_read_func;
+ return 0;
}
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
index 2b998cbed826..317f79efd54c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
@@ -92,6 +92,7 @@ struct hclge_dbg_func {
int (*dbg_dump)(struct hclge_dev *hdev, char *buf, int len);
int (*dbg_dump_reg)(struct hclge_dev *hdev, enum hnae3_dbg_cmd cmd,
char *buf, int len);
+ read_func dbg_read_func;
};
struct hclge_dbg_status_dfx_info {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index a7de67699a01..f209a05e2033 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -490,7 +490,7 @@ static int hclge_mac_update_stats_complete(struct hclge_dev *hdev)
desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1;
/* This may be called inside atomic sections,
- * so GFP_ATOMIC is more suitalbe here
+ * so GFP_ATOMIC is more suitable here
*/
desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
if (!desc)
@@ -582,7 +582,7 @@ static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
int size, u64 *data)
{
u64 *buf = data;
- u32 i;
+ int i;
for (i = 0; i < size; i++) {
if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
@@ -599,7 +599,7 @@ static void hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
const struct hclge_comm_stats_str strs[],
int size, u8 **data)
{
- u32 i;
+ int i;
if (stringset != ETH_SS_STATS)
return;
@@ -2358,7 +2358,7 @@ static int hclge_common_thrd_config(struct hclge_dev *hdev,
for (i = 0; i < 2; i++) {
hclge_cmd_setup_basic_desc(&desc[i],
HCLGE_OPC_RX_COM_THRD_ALLOC, false);
- req = (struct hclge_rx_com_thrd *)&desc[i].data;
+ req = (struct hclge_rx_com_thrd *)desc[i].data;
/* The first descriptor set the NEXT bit to 1 */
if (i == 0)
@@ -2624,7 +2624,7 @@ int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lan
int ret;
duplex = hclge_check_speed_dup(duplex, speed);
- if (!mac->support_autoneg && mac->speed == speed &&
+ if (!mac->support_autoneg && mac->speed == (u32)speed &&
mac->duplex == duplex && (mac->lane_num == lane_num || lane_num == 0))
return 0;
@@ -2652,7 +2652,7 @@ static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
if (ret)
return ret;
- hdev->hw.mac.req_speed = speed;
+ hdev->hw.mac.req_speed = (u32)speed;
hdev->hw.mac.req_duplex = duplex;
return 0;
@@ -3446,7 +3446,7 @@ static int hclge_tp_port_init(struct hclge_dev *hdev)
static int hclge_update_port_info(struct hclge_dev *hdev)
{
struct hclge_mac *mac = &hdev->hw.mac;
- int speed;
+ u32 speed;
int ret;
/* get the port info from SFP cmd if not copper port */
@@ -4872,7 +4872,7 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
}
static int hclge_set_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+ const struct ethtool_rxfh_fields *nfc)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -4890,7 +4890,7 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
}
static int hclge_get_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+ struct ethtool_rxfh_fields *nfc)
{
struct hclge_vport *vport = hclge_get_vport(handle);
u8 tuple_sets;
@@ -6989,7 +6989,7 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
struct hclge_dev *hdev = vport->back;
struct hclge_fd_rule *rule;
struct hlist_node *node2;
- int cnt = 0;
+ u32 cnt = 0;
if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
@@ -8223,14 +8223,14 @@ static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
word_num = vfid / 32;
bit_num = vfid % 32;
if (clr)
- desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
+ desc[1].data[word_num] &= cpu_to_le32(~(1U << bit_num));
else
desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
} else {
word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
bit_num = vfid % 32;
if (clr)
- desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
+ desc[2].data[word_num] &= cpu_to_le32(~(1U << bit_num));
else
desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
}
@@ -9292,7 +9292,7 @@ static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
static int init_mgr_tbl(struct hclge_dev *hdev)
{
int ret;
- int i;
+ u32 i;
for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
@@ -9576,33 +9576,36 @@ static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
return false;
}
-int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
+static int __hclge_enable_vport_vlan_filter(struct hclge_vport *vport,
+ bool request_en)
{
- struct hclge_dev *hdev = vport->back;
bool need_en;
int ret;
- mutex_lock(&hdev->vport_lock);
-
- vport->req_vlan_fltr_en = request_en;
-
need_en = hclge_need_enable_vport_vlan_filter(vport);
- if (need_en == vport->cur_vlan_fltr_en) {
- mutex_unlock(&hdev->vport_lock);
+ if (need_en == vport->cur_vlan_fltr_en)
return 0;
- }
ret = hclge_set_vport_vlan_filter(vport, need_en);
- if (ret) {
- mutex_unlock(&hdev->vport_lock);
+ if (ret)
return ret;
- }
vport->cur_vlan_fltr_en = need_en;
+ return 0;
+}
+
+int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
+{
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ mutex_lock(&hdev->vport_lock);
+ vport->req_vlan_fltr_en = request_en;
+ ret = __hclge_enable_vport_vlan_filter(vport, request_en);
mutex_unlock(&hdev->vport_lock);
- return 0;
+ return ret;
}
static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
@@ -10623,16 +10626,19 @@ static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
&vport->state))
continue;
- ret = hclge_enable_vport_vlan_filter(vport,
- vport->req_vlan_fltr_en);
+ mutex_lock(&hdev->vport_lock);
+ ret = __hclge_enable_vport_vlan_filter(vport,
+ vport->req_vlan_fltr_en);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to sync vlan filter state for vport%u, ret = %d\n",
vport->vport_id, ret);
set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
&vport->state);
+ mutex_unlock(&hdev->vport_lock);
return;
}
+ mutex_unlock(&hdev->vport_lock);
}
}
@@ -10713,7 +10719,7 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
mutex_lock(&hdev->vport_lock);
/* VF's mps must fit within hdev->mps */
- if (vport->vport_id && max_frm_size > hdev->mps) {
+ if (vport->vport_id && (u32)max_frm_size > hdev->mps) {
mutex_unlock(&hdev->vport_lock);
return -EINVAL;
} else if (vport->vport_id) {
@@ -10724,7 +10730,7 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
/* PF's mps must be greater then VF's mps */
for (i = 1; i < hdev->num_alloc_vport; i++)
- if (max_frm_size < hdev->vport[i].mps) {
+ if ((u32)max_frm_size < hdev->vport[i].mps) {
dev_err(&hdev->pdev->dev,
"failed to set pf mtu for less than vport %d, mps = %u.\n",
i, hdev->vport[i].mps);
@@ -11214,7 +11220,7 @@ static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
{
struct hnae3_client *client = vport->nic.client;
struct hclge_dev *hdev = ae_dev->priv;
- int rst_cnt = hdev->rst_stats.reset_cnt;
+ u32 rst_cnt = hdev->rst_stats.reset_cnt;
int ret;
ret = client->ops->init_instance(&vport->nic);
@@ -11258,7 +11264,7 @@ static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
{
struct hclge_dev *hdev = ae_dev->priv;
struct hnae3_client *client;
- int rst_cnt;
+ u32 rst_cnt;
int ret;
if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
@@ -11423,7 +11429,7 @@ static int hclge_pci_init(struct hclge_dev *hdev)
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pdev->dev,
- "can't set consistent PCI DMA");
+ "can't set consistent PCI DMA\n");
goto err_disable_device;
}
dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
@@ -12088,7 +12094,7 @@ static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
int min_tx_rate, int max_tx_rate)
{
if (min_tx_rate != 0 ||
- max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
+ max_tx_rate < 0 || (u32)max_tx_rate > hdev->hw.mac.max_speed) {
dev_err(&hdev->pdev->dev,
"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
@@ -12113,7 +12119,7 @@ static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
if (!vport)
return -EINVAL;
- if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
+ if (!force && (u32)max_tx_rate == vport->vf_info.max_tx_rate)
return 0;
ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
@@ -12864,7 +12870,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_fd_all_rules = hclge_get_all_rules,
.enable_fd = hclge_enable_fd,
.add_arfs_entry = hclge_add_fd_entry_by_arfs,
- .dbg_read_cmd = hclge_dbg_read_cmd,
+ .dbg_get_read_func = hclge_dbg_get_read_func,
.handle_hw_ras_error = hclge_handle_hw_ras_error,
.get_hw_reset_stat = hclge_get_hw_reset_stat,
.ae_dev_resetting = hclge_ae_dev_resetting,
@@ -12904,7 +12910,7 @@ static struct hnae3_ae_algo ae_algo = {
static int __init hclge_init(void)
{
- pr_info("%s is initializing\n", HCLGE_NAME);
+ pr_debug("%s is initializing\n", HCLGE_NAME);
hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
if (!hclge_wq) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index b9fc719880bb..032b472d2368 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -1142,8 +1142,8 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
int hclge_vport_start(struct hclge_vport *vport);
void hclge_vport_stop(struct hclge_vport *vport);
int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
-int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
- char *buf, int len);
+int hclge_dbg_get_read_func(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
+ read_func *func);
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
int hclge_notify_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 59c863306657..c7ff12a6c076 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -749,16 +749,17 @@ static int hclge_get_rss_key(struct hclge_vport *vport,
#define HCLGE_RSS_MBX_RESP_LEN 8
struct hclge_dev *hdev = vport->back;
struct hclge_comm_rss_cfg *rss_cfg;
+ int rss_hash_key_size;
u8 index;
index = mbx_req->msg.data[0];
rss_cfg = &hdev->rss_cfg;
+ rss_hash_key_size = sizeof(rss_cfg->rss_hash_key);
/* Check the query index of rss_hash_key from VF, make sure no
* more than the size of rss_hash_key.
*/
- if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) >
- sizeof(rss_cfg->rss_hash_key)) {
+ if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) > rss_hash_key_size) {
dev_warn(&hdev->pdev->dev,
"failed to get the rss hash key, the index(%u) invalid !\n",
index);
@@ -800,7 +801,7 @@ static void hclge_handle_link_change_event(struct hclge_dev *hdev,
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
{
- u32 tail = hclge_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
+ int tail = hclge_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
return tail == hw->hw.cmq.crq.next_to_use;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 9a456ebf9b7c..96553109f44c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -151,7 +151,7 @@ int hclge_mac_mdio_config(struct hclge_dev *hdev)
mdio_bus->parent = &hdev->pdev->dev;
mdio_bus->priv = hdev;
- mdio_bus->phy_mask = ~(1 << mac->phy_addr);
+ mdio_bus->phy_mask = ~(1U << mac->phy_addr);
ret = mdiobus_register(mdio_bus);
if (ret) {
dev_err(mdio_bus->parent,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index ec581d4b696f..4bd52eab3914 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -497,14 +497,14 @@ int hclge_ptp_init(struct hclge_dev *hdev)
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to init freq, ret = %d\n", ret);
- goto out;
+ goto out_clear_int;
}
ret = hclge_ptp_set_ts_mode(hdev, &hdev->ptp->ts_cfg);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to init ts mode, ret = %d\n", ret);
- goto out;
+ goto out_clear_int;
}
ktime_get_real_ts64(&ts);
@@ -512,7 +512,7 @@ int hclge_ptp_init(struct hclge_dev *hdev)
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to init ts time, ret = %d\n", ret);
- goto out;
+ goto out_clear_int;
}
set_bit(HCLGE_STATE_PTP_EN, &hdev->state);
@@ -520,6 +520,9 @@ int hclge_ptp_init(struct hclge_dev *hdev)
return 0;
+out_clear_int:
+ clear_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags);
+ hclge_ptp_int_en(hdev, false);
out:
hclge_ptp_destroy_clock(hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
index 63483636c074..61faddcc3dd0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
@@ -25,7 +25,7 @@ struct ifreq;
#define HCLGE_PTP_TIME_SEC_H_MASK GENMASK(15, 0)
#define HCLGE_PTP_TIME_SEC_L_REG 0x54
#define HCLGE_PTP_TIME_NSEC_REG 0x58
-#define HCLGE_PTP_TIME_NSEC_MASK GENMASK(29, 0)
+#define HCLGE_PTP_TIME_NSEC_MASK 0x3fffffffLL
#define HCLGE_PTP_TIME_NSEC_NEG BIT(31)
#define HCLGE_PTP_TIME_SYNC_REG 0x5C
#define HCLGE_PTP_TIME_SYNC_EN BIT(0)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index c4f35e8e2177..8fcf220a120d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -606,7 +606,7 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
}
static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+ const struct ethtool_rxfh_fields *nfc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
int ret;
@@ -624,7 +624,7 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
}
static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+ struct ethtool_rxfh_fields *nfc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
u8 tuple_sets;
@@ -2465,7 +2465,7 @@ static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
struct hnae3_client *client)
{
struct hclgevf_dev *hdev = ae_dev->priv;
- int rst_cnt = hdev->rst_stats.rst_cnt;
+ u32 rst_cnt = hdev->rst_stats.rst_cnt;
int ret;
ret = client->ops->init_instance(&hdev->nic);
@@ -2625,7 +2625,7 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret) {
- dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
+ dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting\n");
goto err_disable_device;
}
@@ -3094,11 +3094,7 @@ static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
{
- struct hnae3_handle *nic = &hdev->nic;
- struct hnae3_knic_private_info *kinfo = &nic->kinfo;
-
- return min_t(u32, hdev->rss_size_max,
- hdev->num_tqps / kinfo->tc_info.num_tc);
+ return min(hdev->rss_size_max, hdev->num_tqps);
}
/**
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index 85c2a634c8f9..f5c99ca54369 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -159,7 +159,7 @@ static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw)
{
u32 tail = hclgevf_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
- return tail == hw->hw.cmq.crq.next_to_use;
+ return tail == (u32)hw->hw.cmq.crq.next_to_use;
}
static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
index 7d9d9dbc7560..9de01e344e27 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
@@ -127,37 +127,38 @@ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hnae3_queue *tqp;
- int i, j, reg_um;
+ int i, j, reg_num;
u32 *reg = data;
*version = hdev->fw_version;
reg += hclgevf_reg_get_header(reg);
/* fetching per-VF registers values from VF PCIe register space */
- reg_um = ARRAY_SIZE(cmdq_reg_addr_list);
- reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_CMDQ, reg_um, reg);
- for (i = 0; i < reg_um; i++)
+ reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_CMDQ, reg_num, reg);
+ for (i = 0; i < reg_num; i++)
*reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
- reg_um = ARRAY_SIZE(common_reg_addr_list);
- reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_COMMON, reg_um, reg);
- for (i = 0; i < reg_um; i++)
+ reg_num = ARRAY_SIZE(common_reg_addr_list);
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_COMMON, reg_num, reg);
+ for (i = 0; i < reg_num; i++)
*reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
- reg_um = ARRAY_SIZE(ring_reg_addr_list);
+ reg_num = ARRAY_SIZE(ring_reg_addr_list);
for (j = 0; j < hdev->num_tqps; j++) {
- reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_um, reg);
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_num, reg);
tqp = &hdev->htqp[j].q;
- for (i = 0; i < reg_um; i++)
+ for (i = 0; i < reg_num; i++)
*reg++ = readl_relaxed(tqp->io_base -
HCLGEVF_TQP_REG_OFFSET +
ring_reg_addr_list[i]);
}
- reg_um = ARRAY_SIZE(tqp_intr_reg_addr_list);
+ reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
for (j = 0; j < hdev->num_msi_used - 1; j++) {
- reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_TQP_INTR, reg_um, reg);
- for (i = 0; i < reg_um; i++)
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_TQP_INTR,
+ reg_num, reg);
+ for (i = 0; i < reg_num; i++)
*reg++ = hclgevf_read_dev(&hdev->hw,
tqp_intr_reg_addr_list[i] +
HCLGEVF_RING_INT_REG_OFFSET * j);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index c559dd4291d3..e9f338e9dbe7 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -919,9 +919,10 @@ static int hinic_set_channels(struct net_device *netdev,
return 0;
}
-static int hinic_get_rss_hash_opts(struct hinic_dev *nic_dev,
- struct ethtool_rxnfc *cmd)
+static int hinic_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
struct hinic_rss_type rss_type = { 0 };
int err;
@@ -964,7 +965,7 @@ static int hinic_get_rss_hash_opts(struct hinic_dev *nic_dev,
return 0;
}
-static int set_l4_rss_hash_ops(struct ethtool_rxnfc *cmd,
+static int set_l4_rss_hash_ops(const struct ethtool_rxfh_fields *cmd,
struct hinic_rss_type *rss_type)
{
u8 rss_l4_en = 0;
@@ -1000,16 +1001,18 @@ static int set_l4_rss_hash_ops(struct ethtool_rxnfc *cmd,
return 0;
}
-static int hinic_set_rss_hash_opts(struct hinic_dev *nic_dev,
- struct ethtool_rxnfc *cmd)
+static int hinic_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
- struct hinic_rss_type *rss_type = &nic_dev->rss_type;
+ struct hinic_dev *nic_dev = netdev_priv(dev);
+ struct hinic_rss_type *rss_type;
int err;
- if (!(nic_dev->flags & HINIC_RSS_ENABLE)) {
- cmd->data = 0;
+ rss_type = &nic_dev->rss_type;
+
+ if (!(nic_dev->flags & HINIC_RSS_ENABLE))
return -EOPNOTSUPP;
- }
/* RSS does not support anything other than hashing
* to queues on src and dst IPs and ports
@@ -1108,26 +1111,6 @@ static int hinic_get_rxnfc(struct net_device *netdev,
case ETHTOOL_GRXRINGS:
cmd->data = nic_dev->num_qps;
break;
- case ETHTOOL_GRXFH:
- err = hinic_get_rss_hash_opts(nic_dev, cmd);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
-static int hinic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- int err = 0;
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- err = hinic_set_rss_hash_opts(nic_dev, cmd);
- break;
default:
err = -EOPNOTSUPP;
break;
@@ -1797,11 +1780,12 @@ static const struct ethtool_ops hinic_ethtool_ops = {
.get_channels = hinic_get_channels,
.set_channels = hinic_set_channels,
.get_rxnfc = hinic_get_rxnfc,
- .set_rxnfc = hinic_set_rxnfc,
.get_rxfh_key_size = hinic_get_rxfh_key_size,
.get_rxfh_indir_size = hinic_get_rxfh_indir_size,
.get_rxfh = hinic_get_rxfh,
.set_rxfh = hinic_set_rxfh,
+ .get_rxfh_fields = hinic_get_rxfh_fields,
+ .set_rxfh_fields = hinic_set_rxfh_fields,
.get_sset_count = hinic_get_sset_count,
.get_ethtool_stats = hinic_get_ethtool_stats,
.get_strings = hinic_get_strings,
@@ -1829,11 +1813,12 @@ static const struct ethtool_ops hinicvf_ethtool_ops = {
.get_channels = hinic_get_channels,
.set_channels = hinic_set_channels,
.get_rxnfc = hinic_get_rxnfc,
- .set_rxnfc = hinic_set_rxnfc,
.get_rxfh_key_size = hinic_get_rxfh_key_size,
.get_rxfh_indir_size = hinic_get_rxfh_indir_size,
.get_rxfh = hinic_get_rxfh,
.set_rxfh = hinic_set_rxfh,
+ .get_rxfh_fields = hinic_get_rxfh_fields,
+ .set_rxfh_fields = hinic_set_rxfh_fields,
.get_sset_count = hinic_get_sset_count,
.get_ethtool_stats = hinic_get_ethtool_stats,
.get_strings = hinic_get_strings,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
index 045c47786a04..28114a59347e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
@@ -605,7 +605,7 @@ static void aeq_elements_init(struct hinic_eq *eq, u32 init_val)
/**
* ceq_elements_init - Initialize all the elements in the ceq
* @eq: the event queue
- * @init_val: value to init with it the elements
+ * @init_val: value to init the elements with
**/
static void ceq_elements_init(struct hinic_eq *eq, u32 init_val)
{
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
index 3f9c31d29215..97c1584dc05b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
@@ -861,7 +861,7 @@ static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
HINIC_MBOX_HEADER_SET(NOT_LAST_SEG, LAST) |
HINIC_MBOX_HEADER_SET(direction, DIRECTION) |
HINIC_MBOX_HEADER_SET(cmd, CMD) |
- /* The vf's offset to it's associated pf */
+ /* The vf's offset to its associated pf */
HINIC_MBOX_HEADER_SET(msg_info->msg_id, MSG_ID) |
HINIC_MBOX_HEADER_SET(msg_info->status, STATUS) |
HINIC_MBOX_HEADER_SET(hinic_global_func_id_hw(hwdev->hwif),
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
index ae08257dd1d2..3f7f73430be4 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
@@ -482,7 +482,6 @@ static netdev_tx_t hinic3_send_one_skb(struct sk_buff *skb,
{
struct hinic3_sq_wqe_combo wqe_combo = {};
struct hinic3_tx_info *tx_info;
- struct hinic3_txq *tx_q = txq;
u32 offload, queue_info = 0;
struct hinic3_sq_task task;
u16 wqebb_cnt, num_sge;
@@ -506,9 +505,9 @@ static netdev_tx_t hinic3_send_one_skb(struct sk_buff *skb,
if (likely(wqebb_cnt > txq->tx_stop_thrs))
txq->tx_stop_thrs = min(wqebb_cnt, txq->tx_start_thrs);
- netif_subqueue_try_stop(netdev, tx_q->sq->q_id,
- hinic3_wq_free_wqebbs(&tx_q->sq->wq),
- tx_q->tx_start_thrs);
+ netif_subqueue_try_stop(netdev, txq->sq->q_id,
+ hinic3_wq_free_wqebbs(&txq->sq->wq),
+ txq->tx_start_thrs);
return NETDEV_TX_BUSY;
}
@@ -542,12 +541,11 @@ static netdev_tx_t hinic3_send_one_skb(struct sk_buff *skb,
goto err_drop_pkt;
}
- netdev_tx_sent_queue(netdev_get_tx_queue(netdev, txq->sq->q_id),
- skb->len);
- netif_subqueue_maybe_stop(netdev, tx_q->sq->q_id,
- hinic3_wq_free_wqebbs(&tx_q->sq->wq),
- tx_q->tx_stop_thrs,
- tx_q->tx_start_thrs);
+ netif_subqueue_sent(netdev, txq->sq->q_id, skb->len);
+ netif_subqueue_maybe_stop(netdev, txq->sq->q_id,
+ hinic3_wq_free_wqebbs(&txq->sq->wq),
+ txq->tx_stop_thrs,
+ txq->tx_start_thrs);
hinic3_prepare_sq_ctrl(&wqe_combo, queue_info, num_sge, owner);
hinic3_write_db(txq->sq, 0, DB_CFLAG_DP_SQ,
@@ -631,7 +629,6 @@ bool hinic3_tx_poll(struct hinic3_txq *txq, int budget)
struct net_device *netdev = txq->netdev;
u16 hw_ci, sw_ci, q_id = txq->sq->q_id;
struct hinic3_tx_info *tx_info;
- struct hinic3_txq *tx_q = txq;
unsigned int bytes_compl = 0;
unsigned int pkts = 0;
u16 wqebb_cnt = 0;
@@ -663,8 +660,8 @@ bool hinic3_tx_poll(struct hinic3_txq *txq, int budget)
hinic3_wq_put_wqebbs(&txq->sq->wq, wqebb_cnt);
netif_subqueue_completed_wake(netdev, q_id, pkts, bytes_compl,
- hinic3_wq_free_wqebbs(&tx_q->sq->wq),
- tx_q->tx_start_thrs);
+ hinic3_wq_free_wqebbs(&txq->sq->wq),
+ txq->tx_start_thrs);
return pkts == HINIC3_TX_POLL_WEIGHT;
}
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 24046fe16634..6f0821f1e798 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -211,98 +211,169 @@ static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
struct ibmveth_buff_pool *pool)
{
- u32 i;
- u32 count = pool->size - atomic_read(&pool->available);
- u32 buffers_added = 0;
- struct sk_buff *skb;
- unsigned int free_index, index;
- u64 correlator;
+ union ibmveth_buf_desc descs[IBMVETH_MAX_RX_PER_HCALL] = {0};
+ u32 remaining = pool->size - atomic_read(&pool->available);
+ u64 correlators[IBMVETH_MAX_RX_PER_HCALL] = {0};
unsigned long lpar_rc;
+ u32 buffers_added = 0;
+ u32 i, filled, batch;
+ struct vio_dev *vdev;
dma_addr_t dma_addr;
+ struct device *dev;
+ u32 index;
+
+ vdev = adapter->vdev;
+ dev = &vdev->dev;
mb();
- for (i = 0; i < count; ++i) {
- union ibmveth_buf_desc desc;
+ batch = adapter->rx_buffers_per_hcall;
- free_index = pool->consumer_index;
- index = pool->free_map[free_index];
- skb = NULL;
+ while (remaining > 0) {
+ unsigned int free_index = pool->consumer_index;
- if (WARN_ON(index == IBM_VETH_INVALID_MAP)) {
- schedule_work(&adapter->work);
- goto bad_index_failure;
- }
+ /* Fill a batch of descriptors */
+ for (filled = 0; filled < min(remaining, batch); filled++) {
+ index = pool->free_map[free_index];
+ if (WARN_ON(index == IBM_VETH_INVALID_MAP)) {
+ adapter->replenish_add_buff_failure++;
+ netdev_info(adapter->netdev,
+ "Invalid map index %u, reset\n",
+ index);
+ schedule_work(&adapter->work);
+ break;
+ }
+
+ if (!pool->skbuff[index]) {
+ struct sk_buff *skb = NULL;
- /* are we allocating a new buffer or recycling an old one */
- if (pool->skbuff[index])
- goto reuse;
+ skb = netdev_alloc_skb(adapter->netdev,
+ pool->buff_size);
+ if (!skb) {
+ adapter->replenish_no_mem++;
+ adapter->replenish_add_buff_failure++;
+ break;
+ }
+
+ dma_addr = dma_map_single(dev, skb->data,
+ pool->buff_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_kfree_skb_any(skb);
+ adapter->replenish_add_buff_failure++;
+ break;
+ }
- skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
+ pool->dma_addr[index] = dma_addr;
+ pool->skbuff[index] = skb;
+ } else {
+ /* re-use case */
+ dma_addr = pool->dma_addr[index];
+ }
- if (!skb) {
- netdev_dbg(adapter->netdev,
- "replenish: unable to allocate skb\n");
- adapter->replenish_no_mem++;
- break;
- }
+ if (rx_flush) {
+ unsigned int len;
- dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
- pool->buff_size, DMA_FROM_DEVICE);
+ len = adapter->netdev->mtu + IBMVETH_BUFF_OH;
+ len = min(pool->buff_size, len);
+ ibmveth_flush_buffer(pool->skbuff[index]->data,
+ len);
+ }
- if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
- goto failure;
+ descs[filled].fields.flags_len = IBMVETH_BUF_VALID |
+ pool->buff_size;
+ descs[filled].fields.address = dma_addr;
- pool->dma_addr[index] = dma_addr;
- pool->skbuff[index] = skb;
+ correlators[filled] = ((u64)pool->index << 32) | index;
+ *(u64 *)pool->skbuff[index]->data = correlators[filled];
- if (rx_flush) {
- unsigned int len = min(pool->buff_size,
- adapter->netdev->mtu +
- IBMVETH_BUFF_OH);
- ibmveth_flush_buffer(skb->data, len);
+ free_index++;
+ if (free_index >= pool->size)
+ free_index = 0;
}
-reuse:
- dma_addr = pool->dma_addr[index];
- desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
- desc.fields.address = dma_addr;
-
- correlator = ((u64)pool->index << 32) | index;
- *(u64 *)pool->skbuff[index]->data = correlator;
- lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
- desc.desc);
+ if (!filled)
+ break;
+ /* single buffer case*/
+ if (filled == 1)
+ lpar_rc = h_add_logical_lan_buffer(vdev->unit_address,
+ descs[0].desc);
+ else
+ /* Multi-buffer hcall */
+ lpar_rc = h_add_logical_lan_buffers(vdev->unit_address,
+ descs[0].desc,
+ descs[1].desc,
+ descs[2].desc,
+ descs[3].desc,
+ descs[4].desc,
+ descs[5].desc,
+ descs[6].desc,
+ descs[7].desc);
if (lpar_rc != H_SUCCESS) {
- netdev_warn(adapter->netdev,
- "%sadd_logical_lan failed %lu\n",
- skb ? "" : "When recycling: ", lpar_rc);
- goto failure;
+ dev_warn_ratelimited(dev,
+ "RX h_add_logical_lan failed: filled=%u, rc=%lu, batch=%u\n",
+ filled, lpar_rc, batch);
+ goto hcall_failure;
}
- pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
- pool->consumer_index++;
- if (pool->consumer_index >= pool->size)
- pool->consumer_index = 0;
+ /* Only update pool state after hcall succeeds */
+ for (i = 0; i < filled; i++) {
+ free_index = pool->consumer_index;
+ pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
- buffers_added++;
- adapter->replenish_add_buff_success++;
- }
+ pool->consumer_index++;
+ if (pool->consumer_index >= pool->size)
+ pool->consumer_index = 0;
+ }
- mb();
- atomic_add(buffers_added, &(pool->available));
- return;
+ buffers_added += filled;
+ adapter->replenish_add_buff_success += filled;
+ remaining -= filled;
-failure:
+ memset(&descs, 0, sizeof(descs));
+ memset(&correlators, 0, sizeof(correlators));
+ continue;
- if (dma_addr && !dma_mapping_error(&adapter->vdev->dev, dma_addr))
- dma_unmap_single(&adapter->vdev->dev,
- pool->dma_addr[index], pool->buff_size,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(pool->skbuff[index]);
- pool->skbuff[index] = NULL;
-bad_index_failure:
- adapter->replenish_add_buff_failure++;
+hcall_failure:
+ for (i = 0; i < filled; i++) {
+ index = correlators[i] & 0xffffffffUL;
+ dma_addr = pool->dma_addr[index];
+
+ if (pool->skbuff[index]) {
+ if (dma_addr &&
+ !dma_mapping_error(dev, dma_addr))
+ dma_unmap_single(dev, dma_addr,
+ pool->buff_size,
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(pool->skbuff[index]);
+ pool->skbuff[index] = NULL;
+ }
+ }
+ adapter->replenish_add_buff_failure += filled;
+
+ /*
+ * If multi rx buffers hcall is no longer supported by FW
+ * e.g. in the case of Live Parttion Migration
+ */
+ if (batch > 1 && lpar_rc == H_FUNCTION) {
+ /*
+ * Instead of retry submit single buffer individually
+ * here just set the max rx buffer per hcall to 1
+ * buffers will be respleshed next time
+ * when ibmveth_replenish_buffer_pool() is called again
+ * with single-buffer case
+ */
+ netdev_info(adapter->netdev,
+ "RX Multi buffers not supported by FW, rc=%lu\n",
+ lpar_rc);
+ adapter->rx_buffers_per_hcall = 1;
+ netdev_info(adapter->netdev,
+ "Next rx replesh will fall back to single-buffer hcall\n");
+ }
+ break;
+ }
mb();
atomic_add(buffers_added, &(pool->available));
@@ -1783,6 +1854,19 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
netdev->features |= NETIF_F_FRAGLIST;
}
+ if (ret == H_SUCCESS &&
+ (ret_attr & IBMVETH_ILLAN_RX_MULTI_BUFF_SUPPORT)) {
+ adapter->rx_buffers_per_hcall = IBMVETH_MAX_RX_PER_HCALL;
+ netdev_dbg(netdev,
+ "RX Multi-buffer hcall supported by FW, batch set to %u\n",
+ adapter->rx_buffers_per_hcall);
+ } else {
+ adapter->rx_buffers_per_hcall = 1;
+ netdev_dbg(netdev,
+ "RX Single-buffer hcall mode, batch set to %u\n",
+ adapter->rx_buffers_per_hcall);
+ }
+
netdev->min_mtu = IBMVETH_MIN_MTU;
netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH;
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index b0a2460ec9f9..068f99df133e 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -28,6 +28,7 @@
#define IbmVethMcastRemoveFilter 0x2UL
#define IbmVethMcastClearFilterTable 0x3UL
+#define IBMVETH_ILLAN_RX_MULTI_BUFF_SUPPORT 0x0000000000040000UL
#define IBMVETH_ILLAN_LRG_SR_ENABLED 0x0000000000010000UL
#define IBMVETH_ILLAN_LRG_SND_SUPPORT 0x0000000000008000UL
#define IBMVETH_ILLAN_PADDED_PKT_CSUM 0x0000000000002000UL
@@ -46,6 +47,24 @@
#define h_add_logical_lan_buffer(ua, buf) \
plpar_hcall_norets(H_ADD_LOGICAL_LAN_BUFFER, ua, buf)
+static inline long h_add_logical_lan_buffers(unsigned long unit_address,
+ unsigned long desc1,
+ unsigned long desc2,
+ unsigned long desc3,
+ unsigned long desc4,
+ unsigned long desc5,
+ unsigned long desc6,
+ unsigned long desc7,
+ unsigned long desc8)
+{
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+
+ return plpar_hcall9(H_ADD_LOGICAL_LAN_BUFFERS,
+ retbuf, unit_address,
+ desc1, desc2, desc3, desc4,
+ desc5, desc6, desc7, desc8);
+}
+
/* FW allows us to send 6 descriptors but we only use one so mark
* the other 5 as unused (0)
*/
@@ -101,6 +120,7 @@ static inline long h_illan_attributes(unsigned long unit_address,
#define IBMVETH_MAX_TX_BUF_SIZE (1024 * 64)
#define IBMVETH_MAX_QUEUES 16U
#define IBMVETH_DEFAULT_QUEUES 8U
+#define IBMVETH_MAX_RX_PER_HCALL 8U
static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
static int pool_count[] = { 256, 512, 256, 256, 256 };
@@ -151,6 +171,7 @@ struct ibmveth_adapter {
int rx_csum;
int large_send;
bool is_active_trunk;
+ unsigned int rx_buffers_per_hcall;
u64 fw_ipv6_csum_support;
u64 fw_ipv4_csum_support;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 92647e137cf8..eec971567aac 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2312,8 +2312,6 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
tx_pool->num_buffers - 1 :
tx_pool->consumer_index - 1;
tx_buff = &tx_pool->tx_buff[index];
- adapter->netdev->stats.tx_packets--;
- adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
adapter->tx_stats_buffers[queue_num].batched_packets--;
adapter->tx_stats_buffers[queue_num].bytes -=
tx_buff->skb->len;
@@ -2647,9 +2645,6 @@ tx_err:
}
out:
rcu_read_unlock();
- netdev->stats.tx_dropped += tx_dropped;
- netdev->stats.tx_bytes += tx_bytes;
- netdev->stats.tx_packets += tx_bpackets + tx_dpackets;
adapter->tx_send_failed += tx_send_failed;
adapter->tx_map_failed += tx_map_failed;
adapter->tx_stats_buffers[queue_num].batched_packets += tx_bpackets;
@@ -3452,6 +3447,25 @@ err:
return -ret;
}
+static void ibmvnic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ stats->rx_packets += adapter->rx_stats_buffers[i].packets;
+ stats->rx_bytes += adapter->rx_stats_buffers[i].bytes;
+ }
+
+ for (i = 0; i < adapter->req_tx_queues; i++) {
+ stats->tx_packets += adapter->tx_stats_buffers[i].batched_packets;
+ stats->tx_packets += adapter->tx_stats_buffers[i].direct_packets;
+ stats->tx_bytes += adapter->tx_stats_buffers[i].bytes;
+ stats->tx_dropped += adapter->tx_stats_buffers[i].dropped_packets;
+ }
+}
+
static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ibmvnic_adapter *adapter = netdev_priv(dev);
@@ -3567,8 +3581,6 @@ restart_poll:
length = skb->len;
napi_gro_receive(napi, skb); /* send it up */
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += length;
adapter->rx_stats_buffers[scrq_num].packets++;
adapter->rx_stats_buffers[scrq_num].bytes += length;
frames_processed++;
@@ -3678,6 +3690,7 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
.ndo_set_rx_mode = ibmvnic_set_multi,
.ndo_set_mac_address = ibmvnic_set_mac,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_get_stats64 = ibmvnic_get_stats64,
.ndo_tx_timeout = ibmvnic_tx_timeout,
.ndo_change_mtu = ibmvnic_change_mtu,
.ndo_features_check = ibmvnic_features_check,
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index a189038d88df..246ddce753f9 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -211,7 +211,6 @@ struct ibmvnic_statistics {
u8 reserved[72];
} __packed __aligned(8);
-#define NUM_TX_STATS 3
struct ibmvnic_tx_queue_stats {
u64 batched_packets;
u64 direct_packets;
@@ -219,13 +218,18 @@ struct ibmvnic_tx_queue_stats {
u64 dropped_packets;
};
-#define NUM_RX_STATS 3
+#define NUM_TX_STATS \
+ (sizeof(struct ibmvnic_tx_queue_stats) / sizeof(u64))
+
struct ibmvnic_rx_queue_stats {
u64 packets;
u64 bytes;
u64 interrupts;
};
+#define NUM_RX_STATS \
+ (sizeof(struct ibmvnic_rx_queue_stats) / sizeof(u64))
+
struct ibmvnic_acl_buffer {
__be32 len;
__be32 version;
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 5a331c1c76cb..b05cc0d7a15d 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -231,6 +231,7 @@ config I40E
depends on PCI
select AUXILIARY_BUS
select LIBIE
+ select LIBIE_ADMINQ
select NET_DEVLINK
help
This driver supports Intel(R) Ethernet Controller XL710 Family of
@@ -260,6 +261,7 @@ config I40E_DCB
config IAVF
tristate
select LIBIE
+ select LIBIE_ADMINQ
select NET_SHAPER
config I40EVF
@@ -294,6 +296,7 @@ config ICE
select AUXILIARY_BUS
select DIMLIB
select LIBIE
+ select LIBIE_ADMINQ
select NET_DEVLINK
select PACKING
select PLDMFW
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 8294a7c4f122..ba331899d186 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -638,6 +638,9 @@
/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
#define NVM_SUM 0xBABA
+/* Uninitialized ("empty") checksum word value */
+#define NVM_CHECKSUM_UNINITIALIZED 0xFFFF
+
/* PBA (printed board assembly) number words */
#define NVM_PBA_OFFSET_0 8
#define NVM_PBA_OFFSET_1 9
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 9364bc2b4eb1..c0bbb12eed2e 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -2096,54 +2096,47 @@ static void e1000_get_strings(struct net_device __always_unused *netdev,
}
}
-static int e1000_get_rxnfc(struct net_device *netdev,
- struct ethtool_rxnfc *info,
- u32 __always_unused *rule_locs)
+static int e1000_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *info)
{
- info->data = 0;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 mrqc;
- switch (info->cmd) {
- case ETHTOOL_GRXFH: {
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 mrqc;
+ info->data = 0;
- mrqc = er32(MRQC);
+ mrqc = er32(MRQC);
- if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK))
- return 0;
-
- switch (info->flow_type) {
- case TCP_V4_FLOW:
- if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- fallthrough;
- case UDP_V4_FLOW:
- case SCTP_V4_FLOW:
- case AH_ESP_V4_FLOW:
- case IPV4_FLOW:
- if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
- info->data |= RXH_IP_SRC | RXH_IP_DST;
- break;
- case TCP_V6_FLOW:
- if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- fallthrough;
- case UDP_V6_FLOW:
- case SCTP_V6_FLOW:
- case AH_ESP_V6_FLOW:
- case IPV6_FLOW:
- if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
- info->data |= RXH_IP_SRC | RXH_IP_DST;
- break;
- default:
- break;
- }
+ if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK))
return 0;
- }
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case IPV4_FLOW:
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV6_FLOW:
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
default:
- return -EOPNOTSUPP;
+ break;
}
+ return 0;
}
static int e1000e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
@@ -2352,7 +2345,7 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.get_sset_count = e1000e_get_sset_count,
.get_coalesce = e1000_get_coalesce,
.set_coalesce = e1000_set_coalesce,
- .get_rxnfc = e1000_get_rxnfc,
+ .get_rxfh_fields = e1000_get_rxfh_fields,
.get_ts_info = e1000e_get_ts_info,
.get_eee = e1000e_get_eee,
.set_eee = e1000e_set_eee,
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 364378133526..df4e7d781cb1 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -4274,6 +4274,8 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
ret_val = e1000e_update_nvm_checksum(hw);
if (ret_val)
return ret_val;
+ } else if (hw->mac.type == e1000_pch_tgp) {
+ return 0;
}
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 7719e15813ee..b27a61fab371 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4436,7 +4436,7 @@ u64 e1000e_read_systim(struct e1000_adapter *adapter,
* e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
* @cc: cyclecounter structure
**/
-static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc)
+static u64 e1000e_cyclecounter_read(struct cyclecounter *cc)
{
struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
cc);
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index e609f4df86f4..16369e6d245a 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -558,6 +558,12 @@ s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
checksum += nvm_data;
}
+ if (hw->mac.type == e1000_pch_tgp &&
+ nvm_data == NVM_CHECKSUM_UNINITIALIZED) {
+ e_dbg("Uninitialized NVM Checksum on TGP platform - ignoring\n");
+ return 0;
+ }
+
if (checksum != (u16)NVM_SUM) {
e_dbg("NVM Checksum Invalid\n");
return -E1000_ERR_NVM;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index 6119a4108838..65a2816142d9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -189,13 +189,14 @@ struct fm10k_q_vector {
struct fm10k_ring_container rx, tx;
struct napi_struct napi;
+ struct rcu_head rcu; /* to avoid race with update stats on free */
+
cpumask_t affinity_mask;
char name[IFNAMSIZ + 9];
#ifdef CONFIG_DEBUG_FS
struct dentry *dbg_q_vector;
#endif /* CONFIG_DEBUG_FS */
- struct rcu_head rcu; /* to avoid race with update stats on free */
/* for dynamic allocation of rings associated with this q_vector */
struct fm10k_ring ring[] ____cacheline_internodealigned_in_smp;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 1bc5b6c0b897..1954a04460d1 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -691,9 +691,11 @@ static int fm10k_set_coalesce(struct net_device *dev,
return 0;
}
-static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface,
- struct ethtool_rxnfc *cmd)
+static int fm10k_get_rssh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct fm10k_intfc *interface = netdev_priv(dev);
+
cmd->data = 0;
/* Report default options for RSS on fm10k */
@@ -743,9 +745,6 @@ static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
cmd->data = interface->num_rx_queues;
ret = 0;
break;
- case ETHTOOL_GRXFH:
- ret = fm10k_get_rss_hash_opts(interface, cmd);
- break;
default:
break;
}
@@ -753,9 +752,11 @@ static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return ret;
}
-static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface,
- struct ethtool_rxnfc *nfc)
+static int fm10k_set_rssh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct fm10k_intfc *interface = netdev_priv(dev);
int rss_ipv4_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
interface->flags);
int rss_ipv6_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
@@ -871,22 +872,6 @@ static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface,
return 0;
}
-static int fm10k_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
-{
- struct fm10k_intfc *interface = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = fm10k_set_rss_hash_opt(interface, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static int fm10k_mbx_test(struct fm10k_intfc *interface, u64 *data)
{
struct fm10k_hw *hw = &interface->hw;
@@ -1176,7 +1161,6 @@ static const struct ethtool_ops fm10k_ethtool_ops = {
.get_coalesce = fm10k_get_coalesce,
.set_coalesce = fm10k_set_coalesce,
.get_rxnfc = fm10k_get_rxnfc,
- .set_rxnfc = fm10k_set_rxnfc,
.get_regs = fm10k_get_regs,
.get_regs_len = fm10k_get_regs_len,
.self_test = fm10k_self_test,
@@ -1186,6 +1170,8 @@ static const struct ethtool_ops fm10k_ethtool_ops = {
.get_rxfh_key_size = fm10k_get_rssrk_size,
.get_rxfh = fm10k_get_rssh,
.set_rxfh = fm10k_set_rssh,
+ .get_rxfh_fields = fm10k_get_rssh_fields,
+ .set_rxfh_fields = fm10k_set_rssh_fields,
.get_channels = fm10k_get_channels,
.set_channels = fm10k_set_channels,
.get_ts_info = ethtool_op_get_ts_info,
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index c67963bfe14e..49aa4497efce 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -548,6 +548,7 @@ struct i40e_pf {
u16 empr_count; /* EMP reset count */
u16 pfr_count; /* PF reset count */
u16 sw_int_count; /* SW interrupt count */
+ u32 link_down_events;
struct mutex switch_mutex;
u16 lan_vsi; /* our default LAN VSI */
@@ -660,7 +661,7 @@ struct i40e_pf {
struct ptp_clock_info ptp_caps;
struct sk_buff *ptp_tx_skb;
unsigned long ptp_tx_start;
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
struct timespec64 ptp_prev_hw_time;
struct work_struct ptp_extts0_work;
ktime_t ptp_reset_start;
@@ -945,6 +946,7 @@ struct i40e_q_vector {
u16 reg_idx; /* register index of the interrupt */
struct napi_struct napi;
+ struct rcu_head rcu; /* to avoid race with update stats on free */
struct i40e_ring_container rx;
struct i40e_ring_container tx;
@@ -955,7 +957,6 @@ struct i40e_q_vector {
cpumask_t affinity_mask;
struct irq_affinity_notify affinity_notify;
- struct rcu_head rcu; /* to avoid race with update stats on free */
char name[I40E_INT_NAME_STR_LEN];
bool arm_wb_state;
bool in_busy_poll;
@@ -1302,8 +1303,11 @@ void i40e_ptp_tx_hang(struct i40e_pf *pf);
void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf);
void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index);
void i40e_ptp_set_increment(struct i40e_pf *pf);
-int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
-int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
+int i40e_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int i40e_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void i40e_ptp_save_hw_time(struct i40e_pf *pf);
void i40e_ptp_restore_hw_time(struct i40e_pf *pf);
void i40e_ptp_init(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 175c1320c143..096ec46bb619 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -18,7 +18,7 @@ static int i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
(hw->aq.num_asq_entries *
- sizeof(struct i40e_aq_desc)),
+ sizeof(struct libie_aq_desc)),
I40E_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
return ret_code;
@@ -44,7 +44,7 @@ static int i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
(hw->aq.num_arq_entries *
- sizeof(struct i40e_aq_desc)),
+ sizeof(struct libie_aq_desc)),
I40E_ADMINQ_DESC_ALIGNMENT);
return ret_code;
@@ -80,7 +80,7 @@ static void i40e_free_adminq_arq(struct i40e_hw *hw)
**/
static int i40e_alloc_arq_bufs(struct i40e_hw *hw)
{
- struct i40e_aq_desc *desc;
+ struct libie_aq_desc *desc;
struct i40e_dma_mem *bi;
int ret_code;
int i;
@@ -108,9 +108,9 @@ static int i40e_alloc_arq_bufs(struct i40e_hw *hw)
/* now configure the descriptors for use */
desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
- desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_BUF);
if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
- desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
desc->opcode = 0;
/* This is in accordance with Admin queue design, there is no
* register for buffer size configuration
@@ -119,12 +119,12 @@ static int i40e_alloc_arq_bufs(struct i40e_hw *hw)
desc->retval = 0;
desc->cookie_high = 0;
desc->cookie_low = 0;
- desc->params.external.addr_high =
+ desc->params.generic.addr_high =
cpu_to_le32(upper_32_bits(bi->pa));
- desc->params.external.addr_low =
+ desc->params.generic.addr_low =
cpu_to_le32(lower_32_bits(bi->pa));
- desc->params.external.param0 = 0;
- desc->params.external.param1 = 0;
+ desc->params.generic.param0 = 0;
+ desc->params.generic.param1 = 0;
}
alloc_arq_bufs:
@@ -691,8 +691,8 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
struct i40e_adminq_ring *asq = &(hw->aq.asq);
struct i40e_asq_cmd_details *details;
u16 ntc = asq->next_to_clean;
- struct i40e_aq_desc desc_cb;
- struct i40e_aq_desc *desc;
+ struct libie_aq_desc desc_cb;
+ struct libie_aq_desc *desc;
desc = I40E_ADMINQ_DESC(*asq, ntc);
details = I40E_ADMINQ_DETAILS(*asq, ntc);
@@ -750,7 +750,7 @@ static bool i40e_asq_done(struct i40e_hw *hw)
**/
static int
i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
+ struct libie_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
@@ -758,7 +758,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
{
struct i40e_dma_mem *dma_buff = NULL;
struct i40e_asq_cmd_details *details;
- struct i40e_aq_desc *desc_on_ring;
+ struct libie_aq_desc *desc_on_ring;
bool cmd_completed = false;
u16 retval = 0;
int status = 0;
@@ -771,7 +771,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
goto asq_send_command_error;
}
- hw->aq.asq_last_status = I40E_AQ_RC_OK;
+ hw->aq.asq_last_status = LIBIE_AQ_RC_OK;
val = rd32(hw, I40E_PF_ATQH);
if (val >= hw->aq.num_asq_entries) {
@@ -851,9 +851,9 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
/* Update the address values in the desc with the pa value
* for respective buffer
*/
- desc_on_ring->params.external.addr_high =
+ desc_on_ring->params.generic.addr_high =
cpu_to_le32(upper_32_bits(dma_buff->pa));
- desc_on_ring->params.external.addr_low =
+ desc_on_ring->params.generic.addr_low =
cpu_to_le32(lower_32_bits(dma_buff->pa));
}
@@ -905,13 +905,13 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
retval &= 0xff;
}
cmd_completed = true;
- if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
+ if ((enum libie_aq_err)retval == LIBIE_AQ_RC_OK)
status = 0;
- else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
+ else if ((enum libie_aq_err)retval == LIBIE_AQ_RC_EBUSY)
status = -EBUSY;
else
status = -EIO;
- hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
+ hw->aq.asq_last_status = (enum libie_aq_err)retval;
}
i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
@@ -954,7 +954,7 @@ asq_send_command_error:
**/
int
i40e_asq_send_command_atomic(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
+ struct libie_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
@@ -972,7 +972,7 @@ i40e_asq_send_command_atomic(struct i40e_hw *hw,
}
int
-i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+i40e_asq_send_command(struct i40e_hw *hw, struct libie_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details)
{
@@ -996,12 +996,12 @@ i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
**/
int
i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
+ struct libie_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
bool is_atomic_context,
- enum i40e_admin_queue_err *aq_status)
+ enum libie_aq_err *aq_status)
{
int status;
@@ -1023,13 +1023,13 @@ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
*
* Fill the desc with default values
**/
-void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+void i40e_fill_default_direct_cmd_desc(struct libie_aq_desc *desc,
u16 opcode)
{
/* zero out the desc */
- memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ memset((void *)desc, 0, sizeof(struct libie_aq_desc));
desc->opcode = cpu_to_le16(opcode);
- desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_SI);
}
/**
@@ -1047,7 +1047,7 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
u16 *pending)
{
u16 ntc = hw->aq.arq.next_to_clean;
- struct i40e_aq_desc *desc;
+ struct libie_aq_desc *desc;
struct i40e_dma_mem *bi;
int ret_code = 0;
u16 desc_idx;
@@ -1081,9 +1081,9 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
desc_idx = ntc;
hw->aq.arq_last_status =
- (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
+ (enum libie_aq_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
- if (flags & I40E_AQ_FLAG_ERR) {
+ if (flags & LIBIE_AQ_FLAG_ERR) {
ret_code = -EIO;
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
@@ -1107,14 +1107,14 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
* size
*/
bi = &hw->aq.arq.r.arq_bi[ntc];
- memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ memset((void *)desc, 0, sizeof(struct libie_aq_desc));
- desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_BUF);
if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
- desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
desc->datalen = cpu_to_le16((u16)bi->size);
- desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
- desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
+ desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
/* set tail = the last cleaned desc index. */
wr32(hw, I40E_PF_ARQT, ntc);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 55b5bb884d73..1be97a3a86ce 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -9,7 +9,7 @@
#include "i40e_adminq_cmd.h"
#define I40E_ADMINQ_DESC(R, i) \
- (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
+ (&(((struct libie_aq_desc *)((R).desc_buf.va))[i]))
#define I40E_ADMINQ_DESC_ALIGNMENT 4096
@@ -39,7 +39,7 @@ struct i40e_asq_cmd_details {
u16 flags_dis;
bool async;
bool postpone;
- struct i40e_aq_desc *wb_desc;
+ struct libie_aq_desc *wb_desc;
};
#define I40E_ADMINQ_DETAILS(R, i) \
@@ -47,7 +47,7 @@ struct i40e_asq_cmd_details {
/* ARQ event information */
struct i40e_arq_event_info {
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
u16 msg_len;
u16 buf_len;
u8 *msg_buf;
@@ -72,8 +72,8 @@ struct i40e_adminq_info {
struct mutex arq_mutex; /* Receive queue lock */
/* last status values on send and receive queues */
- enum i40e_admin_queue_err asq_last_status;
- enum i40e_admin_queue_err arq_last_status;
+ enum libie_aq_err asq_last_status;
+ enum libie_aq_err arq_last_status;
};
/**
@@ -119,7 +119,7 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
#define I40E_AQ_LARGE_BUF 512
#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */
-void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+void i40e_fill_default_direct_cmd_desc(struct libie_aq_desc *desc,
u16 opcode);
#endif /* _I40E_ADMINQ_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index c8f35d4de271..76d872b91a38 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -4,6 +4,8 @@
#ifndef _I40E_ADMINQ_CMD_H_
#define _I40E_ADMINQ_CMD_H_
+#include <linux/net/intel/libie/adminq.h>
+
#include <linux/bits.h>
#include <linux/types.h>
@@ -30,75 +32,6 @@
/* API version 1.10 for X722 devices adds ability to request FEC encoding */
#define I40E_MINOR_VER_FW_REQUEST_FEC_X722 0x000A
-struct i40e_aq_desc {
- __le16 flags;
- __le16 opcode;
- __le16 datalen;
- __le16 retval;
- __le32 cookie_high;
- __le32 cookie_low;
- union {
- struct {
- __le32 param0;
- __le32 param1;
- __le32 param2;
- __le32 param3;
- } internal;
- struct {
- __le32 param0;
- __le32 param1;
- __le32 addr_high;
- __le32 addr_low;
- } external;
- u8 raw[16];
- } params;
-};
-
-/* Flags sub-structure
- * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
- * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
- */
-
-/* command flags and offsets*/
-#define I40E_AQ_FLAG_ERR_SHIFT 2
-#define I40E_AQ_FLAG_LB_SHIFT 9
-#define I40E_AQ_FLAG_RD_SHIFT 10
-#define I40E_AQ_FLAG_BUF_SHIFT 12
-#define I40E_AQ_FLAG_SI_SHIFT 13
-
-#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
-#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
-#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
-#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
-
-/* error codes */
-enum i40e_admin_queue_err {
- I40E_AQ_RC_OK = 0, /* success */
- I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
- I40E_AQ_RC_ENOENT = 2, /* No such element */
- I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
- I40E_AQ_RC_EINTR = 4, /* operation interrupted */
- I40E_AQ_RC_EIO = 5, /* I/O error */
- I40E_AQ_RC_ENXIO = 6, /* No such resource */
- I40E_AQ_RC_E2BIG = 7, /* Arg too long */
- I40E_AQ_RC_EAGAIN = 8, /* Try again */
- I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
- I40E_AQ_RC_EACCES = 10, /* Permission denied */
- I40E_AQ_RC_EFAULT = 11, /* Bad address */
- I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
- I40E_AQ_RC_EEXIST = 13, /* object already exists */
- I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
- I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
- I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
- I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
- I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
- I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
- I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
- I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
- I40E_AQ_RC_EFBIG = 22, /* File too large */
-};
-
/* Admin Queue command opcodes */
enum i40e_admin_queue_opc {
/* aq commands */
@@ -320,21 +253,6 @@ struct i40e_aqc_get_version {
__le16 api_minor;
};
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
-
-/* Send driver version (indirect 0x0002) */
-struct i40e_aqc_driver_version {
- u8 driver_major_ver;
- u8 driver_minor_ver;
- u8 driver_build_ver;
- u8 driver_subbuild_ver;
- u8 reserved[4];
- __le32 address_high;
- __le32 address_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
-
/* Queue Shutdown (direct 0x0003) */
struct i40e_aqc_queue_shutdown {
__le32 driver_unloading;
@@ -352,75 +270,6 @@ struct i40e_aqc_set_pf_context {
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
-/* Request resource ownership (direct 0x0008)
- * Release resource ownership (direct 0x0009)
- */
-struct i40e_aqc_request_resource {
- __le16 resource_id;
- __le16 access_type;
- __le32 timeout;
- __le32 resource_number;
- u8 reserved[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
-
-/* Get function capabilities (indirect 0x000A)
- * Get device capabilities (indirect 0x000B)
- */
-struct i40e_aqc_list_capabilites {
- u8 command_flags;
- u8 pf_index;
- u8 reserved[2];
- __le32 count;
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
-
-struct i40e_aqc_list_capabilities_element_resp {
- __le16 id;
- u8 major_rev;
- u8 minor_rev;
- __le32 number;
- __le32 logical_id;
- __le32 phys_id;
- u8 reserved[16];
-};
-
-/* list of caps */
-
-#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001
-#define I40E_AQ_CAP_ID_MNG_MODE 0x0002
-#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
-#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
-#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
-#define I40E_AQ_CAP_ID_SRIOV 0x0012
-#define I40E_AQ_CAP_ID_VF 0x0013
-#define I40E_AQ_CAP_ID_VMDQ 0x0014
-#define I40E_AQ_CAP_ID_8021QBG 0x0015
-#define I40E_AQ_CAP_ID_8021QBR 0x0016
-#define I40E_AQ_CAP_ID_VSI 0x0017
-#define I40E_AQ_CAP_ID_DCB 0x0018
-#define I40E_AQ_CAP_ID_FCOE 0x0021
-#define I40E_AQ_CAP_ID_ISCSI 0x0022
-#define I40E_AQ_CAP_ID_RSS 0x0040
-#define I40E_AQ_CAP_ID_RXQ 0x0041
-#define I40E_AQ_CAP_ID_TXQ 0x0042
-#define I40E_AQ_CAP_ID_MSIX 0x0043
-#define I40E_AQ_CAP_ID_VF_MSIX 0x0044
-#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
-#define I40E_AQ_CAP_ID_1588 0x0046
-#define I40E_AQ_CAP_ID_IWARP 0x0051
-#define I40E_AQ_CAP_ID_LED 0x0061
-#define I40E_AQ_CAP_ID_SDP 0x0062
-#define I40E_AQ_CAP_ID_MDIO 0x0063
-#define I40E_AQ_CAP_ID_WSR_PROT 0x0064
-#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080
-#define I40E_AQ_CAP_ID_FLEX10 0x00F1
-#define I40E_AQ_CAP_ID_CEM 0x00F2
-
/* Set CPPM Configuration (direct 0x0103) */
struct i40e_aqc_cppm_configuration {
__le16 command_flags;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 59263551c383..5f1a405cbbf8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -682,9 +682,7 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
if (err) {
dev_info(&pf->pdev->dev,
"couldn't get PF vsi config, err %pe aq_err %s\n",
- ERR_PTR(err),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
return -ENOENT;
}
@@ -711,8 +709,7 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
dev_info(&pf->pdev->dev,
"update VSI ctxt for PE failed, err %pe aq_err %s\n",
ERR_PTR(err),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
}
}
return err;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index b11c35e307ca..270e7e8cf9cf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -69,66 +69,6 @@ int i40e_set_mac_type(struct i40e_hw *hw)
}
/**
- * i40e_aq_str - convert AQ err code to a string
- * @hw: pointer to the HW structure
- * @aq_err: the AQ error code to convert
- **/
-const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
-{
- switch (aq_err) {
- case I40E_AQ_RC_OK:
- return "OK";
- case I40E_AQ_RC_EPERM:
- return "I40E_AQ_RC_EPERM";
- case I40E_AQ_RC_ENOENT:
- return "I40E_AQ_RC_ENOENT";
- case I40E_AQ_RC_ESRCH:
- return "I40E_AQ_RC_ESRCH";
- case I40E_AQ_RC_EINTR:
- return "I40E_AQ_RC_EINTR";
- case I40E_AQ_RC_EIO:
- return "I40E_AQ_RC_EIO";
- case I40E_AQ_RC_ENXIO:
- return "I40E_AQ_RC_ENXIO";
- case I40E_AQ_RC_E2BIG:
- return "I40E_AQ_RC_E2BIG";
- case I40E_AQ_RC_EAGAIN:
- return "I40E_AQ_RC_EAGAIN";
- case I40E_AQ_RC_ENOMEM:
- return "I40E_AQ_RC_ENOMEM";
- case I40E_AQ_RC_EACCES:
- return "I40E_AQ_RC_EACCES";
- case I40E_AQ_RC_EFAULT:
- return "I40E_AQ_RC_EFAULT";
- case I40E_AQ_RC_EBUSY:
- return "I40E_AQ_RC_EBUSY";
- case I40E_AQ_RC_EEXIST:
- return "I40E_AQ_RC_EEXIST";
- case I40E_AQ_RC_EINVAL:
- return "I40E_AQ_RC_EINVAL";
- case I40E_AQ_RC_ENOTTY:
- return "I40E_AQ_RC_ENOTTY";
- case I40E_AQ_RC_ENOSPC:
- return "I40E_AQ_RC_ENOSPC";
- case I40E_AQ_RC_ENOSYS:
- return "I40E_AQ_RC_ENOSYS";
- case I40E_AQ_RC_ERANGE:
- return "I40E_AQ_RC_ERANGE";
- case I40E_AQ_RC_EFLUSHED:
- return "I40E_AQ_RC_EFLUSHED";
- case I40E_AQ_RC_BAD_ADDR:
- return "I40E_AQ_RC_BAD_ADDR";
- case I40E_AQ_RC_EMODE:
- return "I40E_AQ_RC_EMODE";
- case I40E_AQ_RC_EFBIG:
- return "I40E_AQ_RC_EFBIG";
- }
-
- snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
- return hw->err_str;
-}
-
-/**
* i40e_debug_aq
* @hw: debug mask related to admin queue
* @mask: debug mask
@@ -141,7 +81,7 @@ const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
void *buffer, u16 buf_len)
{
- struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+ struct libie_aq_desc *aq_desc = (struct libie_aq_desc *)desc;
u32 effective_mask = hw->debug_mask & mask;
char prefix[27];
u16 len;
@@ -164,12 +104,12 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
le32_to_cpu(aq_desc->cookie_low));
i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
"\tparam (0,1) 0x%08X 0x%08X\n",
- le32_to_cpu(aq_desc->params.internal.param0),
- le32_to_cpu(aq_desc->params.internal.param1));
+ le32_to_cpu(aq_desc->params.generic.param0),
+ le32_to_cpu(aq_desc->params.generic.param1));
i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
"\taddr (h,l) 0x%08X 0x%08X\n",
- le32_to_cpu(aq_desc->params.external.addr_high),
- le32_to_cpu(aq_desc->params.external.addr_low));
+ le32_to_cpu(aq_desc->params.generic.addr_high),
+ le32_to_cpu(aq_desc->params.generic.addr_low));
if (buffer && buf_len != 0 && len != 0 &&
(effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) {
@@ -214,14 +154,14 @@ bool i40e_check_asq_alive(struct i40e_hw *hw)
int i40e_aq_queue_shutdown(struct i40e_hw *hw,
bool unloading)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_queue_shutdown *cmd =
- (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+ struct i40e_aqc_queue_shutdown *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_queue_shutdown);
+ cmd = libie_aq_raw(&desc);
if (unloading)
cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
@@ -245,9 +185,8 @@ static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
u8 *lut, u16 lut_size,
bool set)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_set_rss_lut *cmd_resp =
- (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+ struct i40e_aqc_get_set_rss_lut *cmd_resp;
+ struct libie_aq_desc desc;
int status;
u16 flags;
@@ -258,9 +197,10 @@ static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_rss_lut);
+ cmd_resp = libie_aq_raw(&desc);
/* Indirect command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_RD);
vsi_id = FIELD_PREP(I40E_AQC_SET_RSS_LUT_VSI_ID_MASK, vsi_id) |
FIELD_PREP(I40E_AQC_SET_RSS_LUT_VSI_VALID, 1);
@@ -326,10 +266,9 @@ static int i40e_aq_get_set_rss_key(struct i40e_hw *hw,
struct i40e_aqc_get_set_rss_key_data *key,
bool set)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_set_rss_key *cmd_resp =
- (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+ struct i40e_aqc_get_set_rss_key *cmd_resp;
+ struct libie_aq_desc desc;
int status;
if (set)
@@ -339,9 +278,10 @@ static int i40e_aq_get_set_rss_key(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_rss_key);
+ cmd_resp = libie_aq_raw(&desc);
/* Indirect command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_RD);
vsi_id = FIELD_PREP(I40E_AQC_SET_RSS_KEY_VSI_ID_MASK, vsi_id) |
FIELD_PREP(I40E_AQC_SET_RSS_KEY_VSI_VALID, 1);
@@ -439,13 +379,13 @@ i40e_aq_mac_address_read(struct i40e_hw *hw,
struct i40e_aqc_mac_address_read_data *addrs,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_mac_address_read *cmd_data =
- (struct i40e_aqc_mac_address_read *)&desc.params.raw;
+ struct i40e_aqc_mac_address_read *cmd_data;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
- desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
+ cmd_data = libie_aq_raw(&desc);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_BUF);
status = i40e_asq_send_command(hw, &desc, addrs,
sizeof(*addrs), cmd_details);
@@ -465,13 +405,13 @@ int i40e_aq_mac_address_write(struct i40e_hw *hw,
u16 flags, u8 *mac_addr,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_mac_address_write *cmd_data =
- (struct i40e_aqc_mac_address_write *)&desc.params.raw;
+ struct i40e_aqc_mac_address_write *cmd_data;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_mac_address_write);
+ cmd_data = libie_aq_raw(&desc);
cmd_data->command_flags = cpu_to_le16(flags);
cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]);
cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) |
@@ -1061,7 +1001,7 @@ i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
{
u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
if (!abilities)
@@ -1071,36 +1011,36 @@ i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_phy_abilities);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (abilities_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
if (qualified_modules)
- desc.params.external.param0 |=
+ desc.params.generic.param0 |=
cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
if (report_init)
- desc.params.external.param0 |=
+ desc.params.generic.param0 |=
cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
status = i40e_asq_send_command(hw, &desc, abilities,
abilities_size, cmd_details);
switch (hw->aq.asq_last_status) {
- case I40E_AQ_RC_EIO:
+ case LIBIE_AQ_RC_EIO:
status = -EIO;
break;
- case I40E_AQ_RC_EAGAIN:
+ case LIBIE_AQ_RC_EAGAIN:
usleep_range(1000, 2000);
total_delay++;
status = -EIO;
break;
- /* also covers I40E_AQ_RC_OK */
+ /* also covers LIBIE_AQ_RC_OK */
default:
break;
}
- } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) &&
+ } while ((hw->aq.asq_last_status == LIBIE_AQ_RC_EAGAIN) &&
(total_delay < max_delay));
if (status)
@@ -1137,9 +1077,8 @@ int i40e_aq_set_phy_config(struct i40e_hw *hw,
struct i40e_aq_set_phy_config *config,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aq_set_phy_config *cmd =
- (struct i40e_aq_set_phy_config *)&desc.params.raw;
+ struct i40e_aq_set_phy_config *cmd;
+ struct libie_aq_desc desc;
int status;
if (!config)
@@ -1148,6 +1087,7 @@ int i40e_aq_set_phy_config(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_config);
+ cmd = libie_aq_raw(&desc);
*cmd = *config;
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -1259,14 +1199,14 @@ int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
int i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_clear_pxe *cmd =
- (struct i40e_aqc_clear_pxe *)&desc.params.raw;
+ struct i40e_aqc_clear_pxe *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_clear_pxe_mode);
+ cmd = libie_aq_raw(&desc);
cmd->rx_cnt = 0x2;
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -1288,14 +1228,14 @@ int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
bool enable_link,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_link_restart_an *cmd =
- (struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
+ struct i40e_aqc_set_link_restart_an *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_link_restart_an);
+ cmd = libie_aq_raw(&desc);
cmd->command = I40E_AQ_PHY_RESTART_AN;
if (enable_link)
cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
@@ -1320,16 +1260,16 @@ int i40e_aq_get_link_info(struct i40e_hw *hw,
bool enable_lse, struct i40e_link_status *link,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_link_status *resp =
- (struct i40e_aqc_get_link_status *)&desc.params.raw;
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ struct i40e_aqc_get_link_status *resp;
+ struct libie_aq_desc desc;
bool tx_pause, rx_pause;
u16 command_flags;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
+ resp = libie_aq_raw(&desc);
if (enable_lse)
command_flags = I40E_AQ_LSE_ENABLE;
else
@@ -1415,14 +1355,14 @@ int i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
u16 mask,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_phy_int_mask *cmd =
- (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
+ struct i40e_aqc_set_phy_int_mask *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_int_mask);
+ cmd = libie_aq_raw(&desc);
cmd->event_mask = cpu_to_le16(mask);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -1441,11 +1381,11 @@ int i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
int i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_lb_mode *cmd =
- (struct i40e_aqc_set_lb_mode *)&desc.params.raw;
+ struct i40e_aqc_set_lb_mode *cmd;
+ struct libie_aq_desc desc;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_lb_modes);
+ cmd = libie_aq_raw(&desc);
if (ena_lpbk) {
if (hw->nvm.version <= I40E_LEGACY_LOOPBACK_NVM_VER)
cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL_LEGACY);
@@ -1467,14 +1407,14 @@ int i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk,
int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_phy_debug *cmd =
- (struct i40e_aqc_set_phy_debug *)&desc.params.raw;
+ struct i40e_aqc_set_phy_debug *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_debug);
+ cmd = libie_aq_raw(&desc);
cmd->command_flags = cmd_flags;
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -1494,23 +1434,22 @@ int i40e_aq_add_vsi(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_get_update_vsi *cmd =
- (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
- struct i40e_aqc_add_get_update_vsi_completion *resp =
- (struct i40e_aqc_add_get_update_vsi_completion *)
- &desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp;
+ struct i40e_aqc_add_get_update_vsi *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_vsi);
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid);
cmd->connection_type = vsi_ctx->connection_type;
cmd->vf_id = vsi_ctx->vf_num;
cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info),
@@ -1538,15 +1477,14 @@ int i40e_aq_set_default_vsi(struct i40e_hw *hw,
u16 seid,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)
- &desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
+ cmd = libie_aq_raw(&desc);
cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
cmd->seid = cpu_to_le16(seid);
@@ -1566,15 +1504,14 @@ int i40e_aq_clear_default_vsi(struct i40e_hw *hw,
u16 seid,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)
- &desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
+ cmd = libie_aq_raw(&desc);
cmd->promiscuous_flags = cpu_to_le16(0);
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
cmd->seid = cpu_to_le16(seid);
@@ -1597,15 +1534,15 @@ int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details,
bool rx_only_promisc)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
u16 flags = 0;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
+ cmd = libie_aq_raw(&desc);
if (set) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
if (rx_only_promisc && i40e_is_aq_api_ver_ge(hw, 1, 5))
@@ -1636,15 +1573,15 @@ int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
u16 seid, bool set,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
u16 flags = 0;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
+ cmd = libie_aq_raw(&desc);
if (set)
flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
@@ -1671,15 +1608,15 @@ int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
u16 vid,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
u16 flags = 0;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
+ cmd = libie_aq_raw(&desc);
if (enable)
flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
@@ -1707,15 +1644,15 @@ int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
u16 vid,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
u16 flags = 0;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
+ cmd = libie_aq_raw(&desc);
if (enable) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
if (i40e_is_aq_api_ver_ge(hw, 1, 5))
@@ -1748,9 +1685,8 @@ int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable, u16 vid,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
u16 flags = 0;
int status;
@@ -1760,6 +1696,7 @@ int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
if (enable)
flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
+ cmd = libie_aq_raw(&desc);
cmd->promiscuous_flags = cpu_to_le16(flags);
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
cmd->seid = cpu_to_le16(seid);
@@ -1783,14 +1720,14 @@ int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
u16 seid, bool set_filter,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
+ cmd = libie_aq_raw(&desc);
if (set_filter)
cmd->promiscuous_flags
|= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
@@ -1815,20 +1752,19 @@ int i40e_aq_get_vsi_params(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_get_update_vsi *cmd =
- (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
- struct i40e_aqc_add_get_update_vsi_completion *resp =
- (struct i40e_aqc_add_get_update_vsi_completion *)
- &desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp;
+ struct i40e_aqc_add_get_update_vsi *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_vsi_parameters);
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info), NULL);
@@ -1857,19 +1793,18 @@ int i40e_aq_update_vsi_params(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_get_update_vsi *cmd =
- (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
- struct i40e_aqc_add_get_update_vsi_completion *resp =
- (struct i40e_aqc_add_get_update_vsi_completion *)
- &desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp;
+ struct i40e_aqc_add_get_update_vsi *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_update_vsi_parameters);
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info),
@@ -1896,16 +1831,16 @@ int i40e_aq_get_switch_config(struct i40e_hw *hw,
u16 buf_size, u16 *start_seid,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_switch_seid *scfg =
- (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ struct i40e_aqc_switch_seid *scfg;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_switch_config);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ scfg = libie_aq_raw(&desc);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
scfg->seid = cpu_to_le16(*start_seid);
status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
@@ -1930,13 +1865,13 @@ int i40e_aq_set_switch_config(struct i40e_hw *hw,
u16 valid_flags, u8 mode,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_switch_config *scfg =
- (struct i40e_aqc_set_switch_config *)&desc.params.raw;
+ struct i40e_aqc_set_switch_config *scfg;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_switch_config);
+ scfg = libie_aq_raw(&desc);
scfg->flags = cpu_to_le16(flags);
scfg->valid_flags = cpu_to_le16(valid_flags);
scfg->mode = mode;
@@ -1968,11 +1903,11 @@ int i40e_aq_get_firmware_version(struct i40e_hw *hw,
u16 *api_major_version, u16 *api_minor_version,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_version *resp =
- (struct i40e_aqc_get_version *)&desc.params.raw;
+ struct i40e_aqc_get_version *resp;
+ struct libie_aq_desc desc;
int status;
+ resp = libie_aq_raw(&desc);
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -2005,22 +1940,22 @@ int i40e_aq_send_driver_version(struct i40e_hw *hw,
struct i40e_driver_version *dv,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_driver_version *cmd =
- (struct i40e_aqc_driver_version *)&desc.params.raw;
+ struct libie_aqc_driver_ver *cmd;
+ struct libie_aq_desc desc;
int status;
u16 len;
if (dv == NULL)
return -EINVAL;
+ cmd = libie_aq_raw(&desc);
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
- desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
- cmd->driver_major_ver = dv->major_version;
- cmd->driver_minor_ver = dv->minor_version;
- cmd->driver_build_ver = dv->build_version;
- cmd->driver_subbuild_ver = dv->subbuild_version;
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD);
+ cmd->major_ver = dv->major_version;
+ cmd->minor_ver = dv->minor_version;
+ cmd->build_ver = dv->build_version;
+ cmd->subbuild_ver = dv->subbuild_version;
len = 0;
while (len < sizeof(dv->driver_string) &&
@@ -2120,11 +2055,9 @@ int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
bool enable_stats,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_veb *cmd =
- (struct i40e_aqc_add_veb *)&desc.params.raw;
- struct i40e_aqc_add_veb_completion *resp =
- (struct i40e_aqc_add_veb_completion *)&desc.params.raw;
+ struct i40e_aqc_add_veb_completion *resp;
+ struct i40e_aqc_add_veb *cmd;
+ struct libie_aq_desc desc;
u16 veb_flags = 0;
int status;
@@ -2132,6 +2065,8 @@ int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
if (!!uplink_seid != !!downlink_seid)
return -EINVAL;
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
cmd->uplink_seid = cpu_to_le16(uplink_seid);
@@ -2178,15 +2113,14 @@ int i40e_aq_get_veb_parameters(struct i40e_hw *hw,
u16 *vebs_used, u16 *vebs_free,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
- (struct i40e_aqc_get_veb_parameters_completion *)
- &desc.params.raw;
+ struct i40e_aqc_get_veb_parameters_completion *cmd_resp;
+ struct libie_aq_desc desc;
int status;
if (veb_seid == 0)
return -EINVAL;
+ cmd_resp = libie_aq_raw(&desc);
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_veb_parameters);
cmd_resp->seid = cpu_to_le16(veb_seid);
@@ -2228,10 +2162,9 @@ get_veb_exit:
**/
static u16
i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list,
- struct i40e_aq_desc *desc, u16 count, u16 seid)
+ struct libie_aq_desc *desc, u16 count, u16 seid)
{
- struct i40e_aqc_macvlan *cmd =
- (struct i40e_aqc_macvlan *)&desc->params.raw;
+ struct i40e_aqc_macvlan *cmd = libie_aq_raw(desc);
u16 buf_size;
int i;
@@ -2249,9 +2182,9 @@ i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list,
mv_list[i].flags |=
cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
- desc->flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc->flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
if (buf_size > I40E_AQ_LARGE_BUF)
- desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc->flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
return buf_size;
}
@@ -2271,7 +2204,7 @@ i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_add_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
u16 buf_size;
if (count == 0 || !mv_list || !hw)
@@ -2302,9 +2235,9 @@ int
i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_add_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status)
+ enum libie_aq_err *aq_status)
{
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
u16 buf_size;
if (count == 0 || !mv_list || !hw)
@@ -2331,9 +2264,8 @@ i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_macvlan *cmd =
- (struct i40e_aqc_macvlan *)&desc.params.raw;
+ struct i40e_aqc_macvlan *cmd;
+ struct libie_aq_desc desc;
u16 buf_size;
int status;
@@ -2344,14 +2276,15 @@ i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
/* prep the rest of the request */
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
+ cmd = libie_aq_raw(&desc);
cmd->num_addresses = cpu_to_le16(count);
cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
cmd->seid[1] = 0;
cmd->seid[2] = 0;
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
cmd_details, true);
@@ -2378,10 +2311,10 @@ int
i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status)
+ enum libie_aq_err *aq_status)
{
struct i40e_aqc_macvlan *cmd;
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
u16 buf_size;
if (count == 0 || !mv_list || !hw)
@@ -2391,15 +2324,15 @@ i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
/* prep the rest of the request */
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
- cmd = (struct i40e_aqc_macvlan *)&desc.params.raw;
+ cmd = libie_aq_raw(&desc);
cmd->num_addresses = cpu_to_le16(count);
cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
cmd->seid[1] = 0;
cmd->seid[2] = 0;
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
cmd_details, true, aq_status);
@@ -2421,21 +2354,21 @@ int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_pf_vf_message *cmd =
- (struct i40e_aqc_pf_vf_message *)&desc.params.raw;
+ struct i40e_aqc_pf_vf_message *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
+ cmd = libie_aq_raw(&desc);
cmd->id = cpu_to_le32(vfid);
desc.cookie_high = cpu_to_le32(v_opcode);
desc.cookie_low = cpu_to_le32(v_retval);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_SI);
if (msglen) {
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
- I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF |
+ LIBIE_AQ_FLAG_RD));
if (msglen > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(msglen);
}
status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
@@ -2456,9 +2389,8 @@ int i40e_aq_debug_read_register(struct i40e_hw *hw,
u32 reg_addr, u64 *reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_debug_reg_read_write *cmd_resp =
- (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+ struct i40e_aqc_debug_reg_read_write *cmd_resp;
+ struct libie_aq_desc desc;
int status;
if (reg_val == NULL)
@@ -2466,6 +2398,7 @@ int i40e_aq_debug_read_register(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
+ cmd_resp = libie_aq_raw(&desc);
cmd_resp->address = cpu_to_le32(reg_addr);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -2491,13 +2424,13 @@ int i40e_aq_debug_write_register(struct i40e_hw *hw,
u32 reg_addr, u64 reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_debug_reg_read_write *cmd =
- (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+ struct i40e_aqc_debug_reg_read_write *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
+ cmd = libie_aq_raw(&desc);
cmd->address = cpu_to_le32(reg_addr);
cmd->value_high = cpu_to_le32((u32)(reg_val >> 32));
cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF));
@@ -2524,16 +2457,16 @@ int i40e_aq_request_resource(struct i40e_hw *hw,
u8 sdp_number, u64 *timeout,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_request_resource *cmd_resp =
- (struct i40e_aqc_request_resource *)&desc.params.raw;
+ struct libie_aqc_req_res *cmd_resp;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
- cmd_resp->resource_id = cpu_to_le16(resource);
+ cmd_resp = libie_aq_raw(&desc);
+ cmd_resp->res_id = cpu_to_le16(resource);
cmd_resp->access_type = cpu_to_le16(access);
- cmd_resp->resource_number = cpu_to_le32(sdp_number);
+ cmd_resp->res_number = cpu_to_le32(sdp_number);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
/* The completion specifies the maximum time in ms that the driver
@@ -2542,7 +2475,7 @@ int i40e_aq_request_resource(struct i40e_hw *hw,
* busy return value and the timeout field indicates the maximum time
* the current owner of the resource has to free it.
*/
- if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
+ if (!status || hw->aq.asq_last_status == LIBIE_AQ_RC_EBUSY)
*timeout = le32_to_cpu(cmd_resp->timeout);
return status;
@@ -2562,15 +2495,15 @@ int i40e_aq_release_resource(struct i40e_hw *hw,
u8 sdp_number,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_request_resource *cmd =
- (struct i40e_aqc_request_resource *)&desc.params.raw;
+ struct libie_aqc_req_res *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
- cmd->resource_id = cpu_to_le16(resource);
- cmd->resource_number = cpu_to_le32(sdp_number);
+ cmd = libie_aq_raw(&desc);
+ cmd->res_id = cpu_to_le16(resource);
+ cmd->res_number = cpu_to_le32(sdp_number);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -2594,9 +2527,8 @@ int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
bool last_command,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_nvm_update *cmd =
- (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ struct i40e_aqc_nvm_update *cmd;
+ struct libie_aq_desc desc;
int status;
/* In offset the highest byte must be zeroed. */
@@ -2607,6 +2539,7 @@ int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
+ cmd = libie_aq_raw(&desc);
/* If this is the last command in a series, set the proper flag. */
if (last_command)
cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
@@ -2614,9 +2547,9 @@ int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
cmd->offset = cpu_to_le32(offset);
cmd->length = cpu_to_le16(length);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (length > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
@@ -2639,9 +2572,8 @@ int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, bool last_command,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_nvm_update *cmd =
- (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ struct i40e_aqc_nvm_update *cmd;
+ struct libie_aq_desc desc;
int status;
/* In offset the highest byte must be zeroed. */
@@ -2652,6 +2584,7 @@ int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
+ cmd = libie_aq_raw(&desc);
/* If this is the last command in a series, set the proper flag. */
if (last_command)
cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
@@ -2678,7 +2611,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
u32 cap_count,
enum i40e_admin_queue_opc list_type_opc)
{
- struct i40e_aqc_list_capabilities_element_resp *cap;
+ struct libie_aqc_list_caps_elem *cap;
u32 valid_functions, num_functions;
u32 number, logical_id, phys_id;
struct i40e_hw_capabilities *p;
@@ -2687,7 +2620,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
int status;
u32 i = 0;
- cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
+ cap = (struct libie_aqc_list_caps_elem *)buff;
if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
p = &hw->dev_caps;
@@ -2697,17 +2630,17 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
return;
for (i = 0; i < cap_count; i++, cap++) {
- id = le16_to_cpu(cap->id);
+ id = le16_to_cpu(cap->cap);
number = le32_to_cpu(cap->number);
logical_id = le32_to_cpu(cap->logical_id);
phys_id = le32_to_cpu(cap->phys_id);
- major_rev = cap->major_rev;
+ major_rev = cap->major_ver;
switch (id) {
- case I40E_AQ_CAP_ID_SWITCH_MODE:
+ case LIBIE_AQC_CAPS_SWITCH_MODE:
p->switch_mode = number;
break;
- case I40E_AQ_CAP_ID_MNG_MODE:
+ case LIBIE_AQC_CAPS_MNG_MODE:
p->management_mode = number;
if (major_rev > 1) {
p->mng_protocols_over_mctp = logical_id;
@@ -2718,76 +2651,76 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
p->mng_protocols_over_mctp = 0;
}
break;
- case I40E_AQ_CAP_ID_NPAR_ACTIVE:
+ case LIBIE_AQC_CAPS_NPAR_ACTIVE:
p->npar_enable = number;
break;
- case I40E_AQ_CAP_ID_OS2BMC_CAP:
+ case LIBIE_AQC_CAPS_OS2BMC_CAP:
p->os2bmc = number;
break;
- case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
+ case LIBIE_AQC_CAPS_VALID_FUNCTIONS:
p->valid_functions = number;
break;
- case I40E_AQ_CAP_ID_SRIOV:
+ case LIBIE_AQC_CAPS_SRIOV:
if (number == 1)
p->sr_iov_1_1 = true;
break;
- case I40E_AQ_CAP_ID_VF:
+ case LIBIE_AQC_CAPS_VF:
p->num_vfs = number;
p->vf_base_id = logical_id;
break;
- case I40E_AQ_CAP_ID_VMDQ:
+ case LIBIE_AQC_CAPS_VMDQ:
if (number == 1)
p->vmdq = true;
break;
- case I40E_AQ_CAP_ID_8021QBG:
+ case LIBIE_AQC_CAPS_8021QBG:
if (number == 1)
p->evb_802_1_qbg = true;
break;
- case I40E_AQ_CAP_ID_8021QBR:
+ case LIBIE_AQC_CAPS_8021QBR:
if (number == 1)
p->evb_802_1_qbh = true;
break;
- case I40E_AQ_CAP_ID_VSI:
+ case LIBIE_AQC_CAPS_VSI:
p->num_vsis = number;
break;
- case I40E_AQ_CAP_ID_DCB:
+ case LIBIE_AQC_CAPS_DCB:
if (number == 1) {
p->dcb = true;
p->enabled_tcmap = logical_id;
p->maxtc = phys_id;
}
break;
- case I40E_AQ_CAP_ID_FCOE:
+ case LIBIE_AQC_CAPS_FCOE:
if (number == 1)
p->fcoe = true;
break;
- case I40E_AQ_CAP_ID_ISCSI:
+ case LIBIE_AQC_CAPS_ISCSI:
if (number == 1)
p->iscsi = true;
break;
- case I40E_AQ_CAP_ID_RSS:
+ case LIBIE_AQC_CAPS_RSS:
p->rss = true;
p->rss_table_size = number;
p->rss_table_entry_width = logical_id;
break;
- case I40E_AQ_CAP_ID_RXQ:
+ case LIBIE_AQC_CAPS_RXQS:
p->num_rx_qp = number;
p->base_queue = phys_id;
break;
- case I40E_AQ_CAP_ID_TXQ:
+ case LIBIE_AQC_CAPS_TXQS:
p->num_tx_qp = number;
p->base_queue = phys_id;
break;
- case I40E_AQ_CAP_ID_MSIX:
+ case LIBIE_AQC_CAPS_MSIX:
p->num_msix_vectors = number;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: MSIX vector count = %d\n",
p->num_msix_vectors);
break;
- case I40E_AQ_CAP_ID_VF_MSIX:
+ case LIBIE_AQC_CAPS_VF_MSIX:
p->num_msix_vectors_vf = number;
break;
- case I40E_AQ_CAP_ID_FLEX10:
+ case LIBIE_AQC_CAPS_FLEX10:
if (major_rev == 1) {
if (number == 1) {
p->flex10_enable = true;
@@ -2803,42 +2736,42 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
p->flex10_mode = logical_id;
p->flex10_status = phys_id;
break;
- case I40E_AQ_CAP_ID_CEM:
+ case LIBIE_AQC_CAPS_CEM:
if (number == 1)
p->mgmt_cem = true;
break;
- case I40E_AQ_CAP_ID_IWARP:
+ case LIBIE_AQC_CAPS_RDMA:
if (number == 1)
p->iwarp = true;
break;
- case I40E_AQ_CAP_ID_LED:
+ case LIBIE_AQC_CAPS_LED:
if (phys_id < I40E_HW_CAP_MAX_GPIO)
p->led[phys_id] = true;
break;
- case I40E_AQ_CAP_ID_SDP:
+ case LIBIE_AQC_CAPS_SDP:
if (phys_id < I40E_HW_CAP_MAX_GPIO)
p->sdp[phys_id] = true;
break;
- case I40E_AQ_CAP_ID_MDIO:
+ case LIBIE_AQC_CAPS_MDIO:
if (number == 1) {
p->mdio_port_num = phys_id;
p->mdio_port_mode = logical_id;
}
break;
- case I40E_AQ_CAP_ID_1588:
+ case LIBIE_AQC_CAPS_1588:
if (number == 1)
p->ieee_1588 = true;
break;
- case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
+ case LIBIE_AQC_CAPS_FD:
p->fd = true;
p->fd_filters_guaranteed = number;
p->fd_filters_best_effort = logical_id;
break;
- case I40E_AQ_CAP_ID_WSR_PROT:
+ case LIBIE_AQC_CAPS_WSR_PROT:
p->wr_csr_prot = (u64)number;
p->wr_csr_prot |= (u64)logical_id << 32;
break;
- case I40E_AQ_CAP_ID_NVM_MGMT:
+ case LIBIE_AQC_CAPS_NVM_MGMT:
if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
p->sec_rev_disabled = true;
if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
@@ -2930,11 +2863,11 @@ int i40e_aq_discover_capabilities(struct i40e_hw *hw,
enum i40e_admin_queue_opc list_type_opc,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aqc_list_capabilites *cmd;
- struct i40e_aq_desc desc;
+ struct libie_aqc_list_caps *cmd;
+ struct libie_aq_desc desc;
int status = 0;
- cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
+ cmd = libie_aq_raw(&desc);
if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
@@ -2944,9 +2877,9 @@ int i40e_aq_discover_capabilities(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
*data_size = le16_to_cpu(desc.datalen);
@@ -2979,9 +2912,8 @@ int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
bool last_command, u8 preservation_flags,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_nvm_update *cmd =
- (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ struct i40e_aqc_nvm_update *cmd;
+ struct libie_aq_desc desc;
int status;
/* In offset the highest byte must be zeroed. */
@@ -2992,6 +2924,7 @@ int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
+ cmd = libie_aq_raw(&desc);
/* If this is the last command in a series, set the proper flag. */
if (last_command)
cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
@@ -3009,9 +2942,9 @@ int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
cmd->offset = cpu_to_le32(offset);
cmd->length = cpu_to_le16(length);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
if (length > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
@@ -3037,11 +2970,9 @@ int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
u16 *local_len, u16 *remote_len,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_get_mib *cmd =
- (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
- struct i40e_aqc_lldp_get_mib *resp =
- (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+ struct i40e_aqc_lldp_get_mib *resp;
+ struct i40e_aqc_lldp_get_mib *cmd;
+ struct libie_aq_desc desc;
int status;
if (buff_size == 0 || !buff)
@@ -3049,16 +2980,18 @@ int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
/* Indirect Command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
cmd->type |= FIELD_PREP(I40E_AQ_LLDP_BRIDGE_TYPE_MASK, bridge_type);
desc.datalen = cpu_to_le16(buff_size);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
if (!status) {
@@ -3087,19 +3020,19 @@ i40e_aq_set_lldp_mib(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aqc_lldp_set_local_mib *cmd;
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
+ cmd = libie_aq_raw(&desc);
if (buff_size == 0 || !buff)
return -EINVAL;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_lldp_set_local_mib);
/* Indirect Command */
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(buff_size);
cmd->type = mib_type;
@@ -3124,13 +3057,13 @@ int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
bool enable_update,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_update_mib *cmd =
- (struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
+ struct i40e_aqc_lldp_update_mib *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
+ cmd = libie_aq_raw(&desc);
if (!enable_update)
cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
@@ -3152,13 +3085,13 @@ int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
bool persist,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_stop *cmd =
- (struct i40e_aqc_lldp_stop *)&desc.params.raw;
+ struct i40e_aqc_lldp_stop *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
+ cmd = libie_aq_raw(&desc);
if (shutdown_agent)
cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
@@ -3186,13 +3119,13 @@ int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_start *cmd =
- (struct i40e_aqc_lldp_start *)&desc.params.raw;
+ struct i40e_aqc_lldp_start *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
+ cmd = libie_aq_raw(&desc);
cmd->command = I40E_AQ_LLDP_AGENT_START;
if (persist) {
@@ -3219,9 +3152,8 @@ int
i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_dcb_parameters *cmd =
- (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
+ struct i40e_aqc_set_dcb_parameters *cmd;
+ struct libie_aq_desc desc;
int status;
if (!test_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, hw->caps))
@@ -3230,6 +3162,7 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_dcb_parameters);
+ cmd = libie_aq_raw(&desc);
if (dcb_enable) {
cmd->valid_flags = I40E_DCB_VALID;
cmd->command = I40E_AQ_DCB_SET_AGENT;
@@ -3252,7 +3185,7 @@ int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
void *buff, u16 buff_size,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
if (buff_size == 0 || !buff)
@@ -3260,7 +3193,7 @@ int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
cmd_details);
@@ -3284,15 +3217,15 @@ int i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
u8 *filter_index,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_udp_tunnel *cmd =
- (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
- struct i40e_aqc_del_udp_tunnel_completion *resp =
- (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
+ struct i40e_aqc_del_udp_tunnel_completion *resp;
+ struct i40e_aqc_add_udp_tunnel *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
cmd->udp_port = cpu_to_le16(udp_port);
cmd->protocol_type = protocol_index;
@@ -3313,13 +3246,13 @@ int i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_remove_udp_tunnel *cmd =
- (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
+ struct i40e_aqc_remove_udp_tunnel *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
+ cmd = libie_aq_raw(&desc);
cmd->index = index;
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -3338,9 +3271,8 @@ int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_switch_seid *cmd =
- (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ struct i40e_aqc_switch_seid *cmd;
+ struct libie_aq_desc desc;
int status;
if (seid == 0)
@@ -3348,6 +3280,7 @@ int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
+ cmd = libie_aq_raw(&desc);
cmd->seid = cpu_to_le16(seid);
status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
@@ -3368,7 +3301,7 @@ int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
int i40e_aq_dcb_updated(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
@@ -3394,9 +3327,8 @@ static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
enum i40e_admin_queue_opc opcode,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_tx_sched_ind *cmd =
- (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
+ struct i40e_aqc_tx_sched_ind *cmd;
+ struct libie_aq_desc desc;
int status;
bool cmd_param_flag = false;
@@ -3423,12 +3355,13 @@ static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
i40e_fill_default_direct_cmd_desc(&desc, opcode);
+ cmd = libie_aq_raw(&desc);
/* Indirect command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (cmd_param_flag)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_RD);
if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(buff_size);
@@ -3451,14 +3384,14 @@ int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
u16 seid, u16 credit, u8 max_credit,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_configure_vsi_bw_limit *cmd =
- (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
+ struct i40e_aqc_configure_vsi_bw_limit *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_configure_vsi_bw_limit);
+ cmd = libie_aq_raw(&desc);
cmd->vsi_seid = cpu_to_le16(seid);
cmd->credit = cpu_to_le16(credit);
cmd->max_credit = max_credit;
@@ -3786,18 +3719,16 @@ int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
struct i40e_control_filter_stats *stats,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_remove_control_packet_filter *cmd =
- (struct i40e_aqc_add_remove_control_packet_filter *)
- &desc.params.raw;
- struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
- (struct i40e_aqc_add_remove_control_packet_filter_completion *)
- &desc.params.raw;
+ struct i40e_aqc_add_remove_control_packet_filter_completion *resp;
+ struct i40e_aqc_add_remove_control_packet_filter *cmd;
+ struct libie_aq_desc desc;
int status;
if (vsi_seid == 0)
return -EINVAL;
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
if (is_add) {
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_control_packet_filter);
@@ -3865,15 +3796,15 @@ static int i40e_aq_alternate_read(struct i40e_hw *hw,
u32 reg_addr0, u32 *reg_val0,
u32 reg_addr1, u32 *reg_val1)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_alternate_write *cmd_resp =
- (struct i40e_aqc_alternate_write *)&desc.params.raw;
+ struct i40e_aqc_alternate_write *cmd_resp;
+ struct libie_aq_desc desc;
int status;
if (!reg_val0)
return -EINVAL;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
+ cmd_resp = libie_aq_raw(&desc);
cmd_resp->address0 = cpu_to_le32(reg_addr0);
cmd_resp->address1 = cpu_to_le32(reg_addr1);
@@ -3901,10 +3832,10 @@ int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aqc_tx_sched_ind *cmd;
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
+ cmd = libie_aq_raw(&desc);
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx);
cmd->vsi_seid = cpu_to_le16(seid);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -3922,7 +3853,7 @@ int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
int i40e_aq_resume_port_tx(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
@@ -3999,11 +3930,9 @@ int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
u8 *ret_next_table, u32 *ret_next_index,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_debug_dump_internals *cmd =
- (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
- struct i40e_aqc_debug_dump_internals *resp =
- (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+ struct i40e_aqc_debug_dump_internals *resp;
+ struct i40e_aqc_debug_dump_internals *cmd;
+ struct libie_aq_desc desc;
int status;
if (buff_size == 0 || !buff)
@@ -4011,10 +3940,12 @@ int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_debug_dump_internals);
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
/* Indirect Command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
cmd->cluster_id = cluster_id;
cmd->table_id = table_id;
@@ -4091,18 +4022,18 @@ i40e_aq_configure_partition_bw(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
u16 bwd_size = sizeof(*bw_data);
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_configure_partition_bw);
/* Indirect command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_RD);
if (bwd_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(bwd_size);
@@ -4534,9 +4465,8 @@ int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
u32 reg_addr, u32 *reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
- (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+ struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp;
+ struct libie_aq_desc desc;
int status;
if (!reg_val)
@@ -4544,6 +4474,7 @@ int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
+ cmd_resp = libie_aq_raw(&desc);
cmd_resp->address = cpu_to_le32(reg_addr);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -4572,7 +4503,7 @@ u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
if (!use_register) {
do_retry:
status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
- if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
+ if (hw->aq.asq_last_status == LIBIE_AQ_RC_EAGAIN && retry) {
usleep_range(1000, 2000);
retry--;
goto do_retry;
@@ -4600,13 +4531,13 @@ int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_rx_ctl_reg_read_write *cmd =
- (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+ struct i40e_aqc_rx_ctl_reg_read_write *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
+ cmd = libie_aq_raw(&desc);
cmd->address = cpu_to_le32(reg_addr);
cmd->value = cpu_to_le32(reg_val);
@@ -4634,7 +4565,7 @@ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
do_retry:
status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
reg_val, NULL);
- if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
+ if (hw->aq.asq_last_status == LIBIE_AQ_RC_EAGAIN && retry) {
usleep_range(1000, 2000);
retry--;
goto do_retry;
@@ -4693,14 +4624,14 @@ int i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_phy_register_access *cmd =
- (struct i40e_aqc_phy_register_access *)&desc.params.raw;
+ struct i40e_aqc_phy_register_access *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_register);
+ cmd = libie_aq_raw(&desc);
cmd->phy_interface = phy_select;
cmd->dev_address = dev_addr;
cmd->reg_address = cpu_to_le32(reg_addr);
@@ -4738,14 +4669,14 @@ int i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
u32 reg_addr, u32 *reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_phy_register_access *cmd =
- (struct i40e_aqc_phy_register_access *)&desc.params.raw;
+ struct i40e_aqc_phy_register_access *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_phy_register);
+ cmd = libie_aq_raw(&desc);
cmd->phy_interface = phy_select;
cmd->dev_address = dev_addr;
cmd->reg_address = cpu_to_le32(reg_addr);
@@ -4777,19 +4708,18 @@ int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
u32 *error_offset, u32 *error_info,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_write_personalization_profile *cmd =
- (struct i40e_aqc_write_personalization_profile *)
- &desc.params.raw;
+ struct i40e_aqc_write_personalization_profile *cmd;
struct i40e_aqc_write_ddp_resp *resp;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_write_personalization_profile);
- desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
+ cmd = libie_aq_raw(&desc);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD);
if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(buff_size);
@@ -4797,7 +4727,7 @@ int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
if (!status) {
- resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
+ resp = libie_aq_raw(&desc);
if (error_offset)
*error_offset = le32_to_cpu(resp->error_offset);
if (error_info)
@@ -4819,17 +4749,17 @@ int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
u16 buff_size, u8 flags,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_applied_profiles *cmd =
- (struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
+ struct i40e_aqc_get_applied_profiles *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_personalization_profile_list);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ cmd = libie_aq_raw(&desc);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(buff_size);
cmd->flags = flags;
@@ -4891,7 +4821,7 @@ i40e_find_segment_in_package(u32 segment_type,
static int i40e_ddp_exec_aq_section(struct i40e_hw *hw,
struct i40e_profile_aq_section *aq)
{
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
u8 *msg = NULL;
u16 msglen;
int status;
@@ -4902,10 +4832,10 @@ static int i40e_ddp_exec_aq_section(struct i40e_hw *hw,
msglen = aq->datalen;
if (msglen) {
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
- I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF |
+ LIBIE_AQ_FLAG_RD));
if (msglen > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(msglen);
msg = &aq->data[0];
}
@@ -5122,18 +5052,18 @@ i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_remove_cloud_filters *cmd =
- (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ struct i40e_aqc_add_remove_cloud_filters *cmd;
+ struct libie_aq_desc desc;
u16 buff_len;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_cloud_filters);
+ cmd = libie_aq_raw(&desc);
buff_len = filter_count * sizeof(*filters);
desc.datalen = cpu_to_le16(buff_len);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
cmd->num_filters = filter_count;
cmd->seid = cpu_to_le16(seid);
@@ -5159,9 +5089,8 @@ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_bb *filters,
u8 filter_count)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_remove_cloud_filters *cmd =
- (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ struct i40e_aqc_add_remove_cloud_filters *cmd;
+ struct libie_aq_desc desc;
u16 buff_len;
int status;
int i;
@@ -5169,9 +5098,10 @@ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_cloud_filters);
+ cmd = libie_aq_raw(&desc);
buff_len = filter_count * sizeof(*filters);
desc.datalen = cpu_to_le16(buff_len);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
cmd->num_filters = filter_count;
cmd->seid = cpu_to_le16(seid);
cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
@@ -5215,18 +5145,18 @@ i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_remove_cloud_filters *cmd =
- (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ struct i40e_aqc_add_remove_cloud_filters *cmd;
+ struct libie_aq_desc desc;
u16 buff_len;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_remove_cloud_filters);
+ cmd = libie_aq_raw(&desc);
buff_len = filter_count * sizeof(*filters);
desc.datalen = cpu_to_le16(buff_len);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
cmd->num_filters = filter_count;
cmd->seid = cpu_to_le16(seid);
@@ -5252,9 +5182,8 @@ i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_bb *filters,
u8 filter_count)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_remove_cloud_filters *cmd =
- (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ struct i40e_aqc_add_remove_cloud_filters *cmd;
+ struct libie_aq_desc desc;
u16 buff_len;
int status;
int i;
@@ -5262,9 +5191,10 @@ i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_remove_cloud_filters);
+ cmd = libie_aq_raw(&desc);
buff_len = filter_count * sizeof(*filters);
desc.datalen = cpu_to_le16(buff_len);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
cmd->num_filters = filter_count;
cmd->seid = cpu_to_le16(seid);
cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 352e957443fd..9e0c9597aeb9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -750,7 +750,7 @@ static int i40e_get_ieee_dcb_config(struct i40e_hw *hw)
I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
&hw->remote_dcbx_config);
/* Don't treat ENOENT as an error for Remote MIBs */
- if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ if (hw->aq.asq_last_status == LIBIE_AQ_RC_ENOENT)
ret = 0;
out:
@@ -799,7 +799,7 @@ int i40e_get_dcb_config(struct i40e_hw *hw)
}
/* CEE mode not enabled try querying IEEE data */
- if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ if (hw->aq.asq_last_status == LIBIE_AQ_RC_ENOENT)
return i40e_get_ieee_dcb_config(hw);
if (ret)
@@ -816,7 +816,7 @@ int i40e_get_dcb_config(struct i40e_hw *hw)
I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
&hw->remote_dcbx_config);
/* Don't treat ENOENT as an error for Remote MIBs */
- if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ if (hw->aq.asq_last_status == LIBIE_AQ_RC_ENOENT)
ret = 0;
out:
@@ -925,11 +925,11 @@ i40e_get_fw_lldp_status(struct i40e_hw *hw,
if (!ret) {
*lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED;
- } else if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) {
+ } else if (hw->aq.asq_last_status == LIBIE_AQ_RC_ENOENT) {
/* MIB is not available yet but the agent is running */
*lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED;
ret = 0;
- } else if (hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
+ } else if (hw->aq.asq_last_status == LIBIE_AQ_RC_EPERM) {
*lldp_status = I40E_GET_FW_LLDP_STATUS_DISABLED;
ret = 0;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 8aa43aefe84c..a2ccf4c5e30b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -136,7 +136,7 @@ static int i40e_dcbnl_ieee_setets(struct net_device *netdev,
dev_info(&pf->pdev->dev,
"Failed setting DCB ETS configuration err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -175,7 +175,7 @@ static int i40e_dcbnl_ieee_setpfc(struct net_device *netdev,
dev_info(&pf->pdev->dev,
"Failed setting DCB PFC configuration err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -226,7 +226,7 @@ static int i40e_dcbnl_ieee_setapp(struct net_device *netdev,
dev_info(&pf->pdev->dev,
"Failed setting DCB configuration err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -291,7 +291,7 @@ static int i40e_dcbnl_ieee_delapp(struct net_device *netdev,
dev_info(&pf->pdev->dev,
"Failed setting DCB configuration err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -EINVAL;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 6cd9da662ae1..6cd6f23d42a6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -489,7 +489,7 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n");
ring = &(hw->aq.asq);
for (i = 0; i < ring->count; i++) {
- struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+ struct libie_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
dev_info(&pf->pdev->dev,
" at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
@@ -502,7 +502,7 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n");
ring = &(hw->aq.arq);
for (i = 0; i < ring->count; i++) {
- struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+ struct libie_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
dev_info(&pf->pdev->dev,
" ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
@@ -1268,10 +1268,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats port\n");
}
} else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {
- struct i40e_aq_desc *desc;
+ struct libie_aq_desc *desc;
int ret;
- desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (!desc)
goto command_write_done;
cnt = sscanf(&cmd_buf[11],
@@ -1279,10 +1279,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
&desc->flags,
&desc->opcode, &desc->datalen, &desc->retval,
&desc->cookie_high, &desc->cookie_low,
- &desc->params.internal.param0,
- &desc->params.internal.param1,
- &desc->params.internal.param2,
- &desc->params.internal.param3);
+ &desc->params.generic.param0,
+ &desc->params.generic.param1,
+ &desc->params.generic.addr_high,
+ &desc->params.generic.addr_low);
if (cnt != 10) {
dev_info(&pf->pdev->dev,
"send aq_cmd: bad command string, cnt=%d\n",
@@ -1307,19 +1307,19 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
"AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
desc->flags, desc->opcode, desc->datalen, desc->retval,
desc->cookie_high, desc->cookie_low,
- desc->params.internal.param0,
- desc->params.internal.param1,
- desc->params.internal.param2,
- desc->params.internal.param3);
+ desc->params.generic.param0,
+ desc->params.generic.param1,
+ desc->params.generic.addr_high,
+ desc->params.generic.addr_low);
kfree(desc);
desc = NULL;
} else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) {
- struct i40e_aq_desc *desc;
+ struct libie_aq_desc *desc;
u16 buffer_len;
u8 *buff;
int ret;
- desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (!desc)
goto command_write_done;
cnt = sscanf(&cmd_buf[20],
@@ -1327,10 +1327,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
&desc->flags,
&desc->opcode, &desc->datalen, &desc->retval,
&desc->cookie_high, &desc->cookie_low,
- &desc->params.internal.param0,
- &desc->params.internal.param1,
- &desc->params.internal.param2,
- &desc->params.internal.param3,
+ &desc->params.generic.param0,
+ &desc->params.generic.param1,
+ &desc->params.generic.addr_high,
+ &desc->params.generic.addr_low,
&buffer_len);
if (cnt != 11) {
dev_info(&pf->pdev->dev,
@@ -1350,7 +1350,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
desc = NULL;
goto command_write_done;
}
- desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc->flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
ret = i40e_asq_send_command(&pf->hw, desc, buff,
buffer_len, NULL);
if (!ret) {
@@ -1368,10 +1368,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
"AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
desc->flags, desc->opcode, desc->datalen, desc->retval,
desc->cookie_high, desc->cookie_low,
- desc->params.internal.param0,
- desc->params.internal.param1,
- desc->params.internal.param2,
- desc->params.internal.param3);
+ desc->params.generic.param0,
+ desc->params.generic.param1,
+ desc->params.generic.addr_high,
+ desc->params.generic.addr_low);
print_hex_dump(KERN_INFO, "AQ buffer WB: ",
DUMP_PREFIX_OFFSET, 16, 1,
buff, buffer_len, true);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 8a7a83f83ee5..86c72596617a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -3,6 +3,7 @@
/* ethtool support for i40e */
+#include <linux/net/intel/libie/pctype.h>
#include "i40e_devids.h"
#include "i40e_diag.h"
#include "i40e_txrx_common.h"
@@ -1461,7 +1462,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
netdev_info(netdev,
"Set phy config failed, err %pe aq_err %s\n",
ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
err = -EAGAIN;
goto done;
}
@@ -1471,7 +1472,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
netdev_dbg(netdev,
"Updating link info failed with err %pe aq_err %s\n",
ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
} else {
netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
@@ -1519,7 +1520,7 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
netdev_info(netdev,
"Set phy config failed, err %pe aq_err %s\n",
ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
err = -EAGAIN;
goto done;
}
@@ -1533,7 +1534,7 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
netdev_dbg(netdev,
"Updating link info failed with err %pe aq_err %s\n",
ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
}
done:
@@ -1640,7 +1641,7 @@ static int i40e_nway_reset(struct net_device *netdev)
if (ret) {
netdev_info(netdev, "link restart failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
return -EIO;
}
@@ -1757,19 +1758,19 @@ static int i40e_set_pauseparam(struct net_device *netdev,
if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %pe aq_err %s\n",
ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
err = -EAGAIN;
}
if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
netdev_info(netdev, "Set fc failed on the set_phy_config call with err %pe aq_err %s\n",
ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
err = -EAGAIN;
}
if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
netdev_info(netdev, "Set fc failed on the get_link_info call with err %pe aq_err %s\n",
ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
err = -EAGAIN;
}
@@ -1917,13 +1918,13 @@ static int i40e_get_eeprom(struct net_device *netdev,
ret_val = i40e_aq_read_nvm(hw, 0x0, offset, len,
(u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
last, NULL);
- if (ret_val && hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
+ if (ret_val && hw->aq.asq_last_status == LIBIE_AQ_RC_EPERM) {
dev_info(&pf->pdev->dev,
"read NVM failed, invalid offset 0x%x\n",
offset);
break;
} else if (ret_val &&
- hw->aq.asq_last_status == I40E_AQ_RC_EACCES) {
+ hw->aq.asq_last_status == LIBIE_AQ_RC_EACCES) {
dev_info(&pf->pdev->dev,
"read NVM failed, access, offset 0x%x\n",
offset);
@@ -2749,6 +2750,15 @@ skip_ol_tests:
netif_info(pf, drv, netdev, "testing failed\n");
}
+static void i40e_get_link_ext_stats(struct net_device *netdev,
+ struct ethtool_link_ext_stats *stats)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ stats->link_down_events = pf->link_down_events;
+}
+
static void i40e_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
@@ -3129,15 +3139,12 @@ static int i40e_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
return __i40e_set_coalesce(netdev, ec, queue);
}
-/**
- * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
- * @pf: pointer to the physical function struct
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the flow is supported, else Invalid Input.
- **/
-static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
+static int i40e_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
u8 flow_pctype = 0;
u64 i_set = 0;
@@ -3146,16 +3153,16 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
switch (cmd->flow_type) {
case TCP_V4_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ flow_pctype = LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP;
break;
case UDP_V4_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ flow_pctype = LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP;
break;
case TCP_V6_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ flow_pctype = LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP;
break;
case UDP_V6_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ flow_pctype = LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP;
break;
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
@@ -3412,28 +3419,28 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
switch (rule->flow_type) {
case SCTP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP;
break;
case TCP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP;
break;
case UDP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP;
break;
case SCTP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP;
break;
case TCP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP;
break;
case UDP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP;
break;
case IP_USER_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER;
break;
case IPV6_USER_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER;
break;
default:
/* If we have stored a filter with a flow type not listed here
@@ -3535,9 +3542,6 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
cmd->data = vsi->rss_size;
ret = 0;
break;
- case ETHTOOL_GRXFH:
- ret = i40e_get_rss_hash_opts(pf, cmd);
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = pf->fdir_pf_active_filters;
/* report total rule count */
@@ -3566,7 +3570,7 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
* Returns value of bits to be set per user request
**/
static u64 i40e_get_rss_hash_bits(struct i40e_hw *hw,
- struct ethtool_rxnfc *nfc,
+ const struct ethtool_rxfh_fields *nfc,
u64 i_setc)
{
u64 i_set = i_setc;
@@ -3611,15 +3615,13 @@ static u64 i40e_get_rss_hash_bits(struct i40e_hw *hw,
}
#define FLOW_PCTYPES_SIZE 64
-/**
- * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
- * @pf: pointer to the physical function struct
- * @nfc: ethtool rxnfc command
- *
- * Returns Success if the flow input set is supported.
- **/
-static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
+static int i40e_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
@@ -3643,40 +3645,40 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
switch (nfc->flow_type) {
case TCP_V4_FLOW:
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes);
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes);
if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
pf->hw.caps))
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK,
flow_pctypes);
break;
case TCP_V6_FLOW:
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes);
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes);
if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
pf->hw.caps))
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK,
flow_pctypes);
break;
case UDP_V4_FLOW:
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes);
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes);
if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
pf->hw.caps)) {
- set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP,
flow_pctypes);
- set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP,
flow_pctypes);
}
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4);
break;
case UDP_V6_FLOW:
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes);
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes);
if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
pf->hw.caps)) {
- set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP,
flow_pctypes);
- set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP,
flow_pctypes);
}
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6);
break;
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -3685,7 +3687,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
if ((nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3))
return -EINVAL;
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER);
break;
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
@@ -3694,15 +3696,15 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
if ((nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3))
return -EINVAL;
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER);
break;
case IPV4_FLOW:
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4);
break;
case IPV6_FLOW:
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6);
break;
default:
return -EINVAL;
@@ -4312,36 +4314,36 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
switch (fsp->flow_type & ~FLOW_EXT) {
case SCTP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP;
fdir_filter_count = &pf->fd_sctp4_filter_cnt;
break;
case TCP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP;
fdir_filter_count = &pf->fd_tcp4_filter_cnt;
break;
case UDP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP;
fdir_filter_count = &pf->fd_udp4_filter_cnt;
break;
case SCTP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP;
fdir_filter_count = &pf->fd_sctp6_filter_cnt;
break;
case TCP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP;
fdir_filter_count = &pf->fd_tcp6_filter_cnt;
break;
case UDP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP;
fdir_filter_count = &pf->fd_udp6_filter_cnt;
break;
case IP_USER_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER;
fdir_filter_count = &pf->fd_ip4_filter_cnt;
flex_l3 = true;
break;
case IPV6_USER_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER;
fdir_filter_count = &pf->fd_ip6_filter_cnt;
flex_l3 = true;
break;
@@ -4677,8 +4679,8 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
* separate support, we'll always assume and enforce that the two flow
* types must have matching input sets.
*/
- if (index == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER)
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
+ if (index == LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER)
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_FRAG_IPV4,
new_mask);
/* Add the new offset and update table, if necessary */
@@ -4954,13 +4956,9 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
- struct i40e_pf *pf = vsi->back;
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = i40e_set_rss_hash_opt(pf, cmd);
- break;
case ETHTOOL_SRXCLSRLINS:
ret = i40e_add_fdir_ethtool(vsi, cmd);
break;
@@ -5251,9 +5249,9 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
DECLARE_BITMAP(orig_flags, I40E_PF_FLAGS_NBITS);
DECLARE_BITMAP(new_flags, I40E_PF_FLAGS_NBITS);
struct i40e_netdev_priv *np = netdev_priv(dev);
- enum i40e_admin_queue_err adq_err;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ enum libie_aq_err adq_err;
u32 reset_needed = 0;
int status;
u32 i, j;
@@ -5373,12 +5371,11 @@ flags_complete:
valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags,
0, NULL);
- if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+ if (ret && pf->hw.aq.asq_last_status != LIBIE_AQ_RC_ESRCH) {
dev_info(&pf->pdev->dev,
"couldn't set switch config bits, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
/* not a fatal problem, just keep going */
}
}
@@ -5440,16 +5437,16 @@ flags_complete:
if (status) {
adq_err = pf->hw.aq.asq_last_status;
switch (adq_err) {
- case I40E_AQ_RC_EEXIST:
+ case LIBIE_AQ_RC_EEXIST:
dev_warn(&pf->pdev->dev,
"FW LLDP agent is already running\n");
reset_needed = 0;
break;
- case I40E_AQ_RC_EPERM:
+ case LIBIE_AQ_RC_EPERM:
dev_warn(&pf->pdev->dev,
"Device configuration forbids SW from starting the LLDP agent.\n");
return -EINVAL;
- case I40E_AQ_RC_EAGAIN:
+ case LIBIE_AQ_RC_EAGAIN:
dev_warn(&pf->pdev->dev,
"Stop FW LLDP agent command is still being processed, please try again in a second.\n");
return -EBUSY;
@@ -5457,8 +5454,7 @@ flags_complete:
dev_warn(&pf->pdev->dev,
"Starting FW LLDP agent failed: error: %pe, %s\n",
ERR_PTR(status),
- i40e_aq_str(&pf->hw,
- adq_err));
+ libie_aq_str(adq_err));
return -EINVAL;
}
}
@@ -5809,6 +5805,7 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.get_regs = i40e_get_regs,
.nway_reset = i40e_nway_reset,
.get_link = ethtool_op_get_link,
+ .get_link_ext_stats = i40e_get_link_ext_stats,
.get_wol = i40e_get_wol,
.set_wol = i40e_set_wol,
.set_eeprom = i40e_set_eeprom,
@@ -5835,6 +5832,8 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.get_rxfh_indir_size = i40e_get_rxfh_indir_size,
.get_rxfh = i40e_get_rxfh,
.set_rxfh = i40e_set_rxfh,
+ .get_rxfh_fields = i40e_get_rxfh_fields,
+ .set_rxfh_fields = i40e_set_rxfh_fields,
.get_channels = i40e_get_channels,
.set_channels = i40e_set_channels,
.get_module_info = i40e_get_module_info,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index f1c9e575703e..b83f823e4917 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -3,6 +3,7 @@
#include <generated/utsrelease.h>
#include <linux/crash_dump.h>
+#include <linux/net/intel/libie/pctype.h>
#include <linux/if_bridge.h>
#include <linux/if_macvlan.h>
#include <linux/module.h>
@@ -100,6 +101,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX
MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
MODULE_IMPORT_NS("LIBIE");
+MODULE_IMPORT_NS("LIBIE_ADMINQ");
MODULE_LICENSE("GPL v2");
static struct workqueue_struct *i40e_wq;
@@ -1813,7 +1815,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
if (ret)
netdev_info(netdev, "Ignoring error from firmware on LAA update, status %pe, AQ ret %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
}
/* schedule our worker thread which will take care of
@@ -1845,7 +1847,7 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
dev_info(&pf->pdev->dev,
"Cannot set RSS key, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
return ret;
}
}
@@ -1857,7 +1859,7 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
dev_info(&pf->pdev->dev,
"Cannot set RSS lut, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
return ret;
}
}
@@ -2339,19 +2341,18 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
int num_del, int *retval)
{
struct i40e_hw *hw = &vsi->back->hw;
- enum i40e_admin_queue_err aq_status;
+ enum libie_aq_err aq_status;
int aq_ret;
aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL,
&aq_status);
/* Explicitly ignore and do not report when firmware returns ENOENT */
- if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) {
+ if (aq_ret && !(aq_status == LIBIE_AQ_RC_ENOENT)) {
*retval = -EIO;
dev_info(&vsi->back->pdev->dev,
"ignoring delete macvlan error on %s, err %pe, aq_err %s\n",
- vsi_name, ERR_PTR(aq_ret),
- i40e_aq_str(hw, aq_status));
+ vsi_name, ERR_PTR(aq_ret), libie_aq_str(aq_status));
}
}
@@ -2374,7 +2375,7 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
int num_add)
{
struct i40e_hw *hw = &vsi->back->hw;
- enum i40e_admin_queue_err aq_status;
+ enum libie_aq_err aq_status;
int fcnt;
i40e_aq_add_macvlan_v2(hw, vsi->seid, list, num_add, NULL, &aq_status);
@@ -2385,19 +2386,17 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, promiscuous mode forced on\n",
- i40e_aq_str(hw, aq_status), vsi_name);
+ libie_aq_str(aq_status), vsi_name);
} else if (vsi->type == I40E_VSI_SRIOV ||
vsi->type == I40E_VSI_VMDQ1 ||
vsi->type == I40E_VSI_VMDQ2) {
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
- i40e_aq_str(hw, aq_status), vsi_name,
- vsi_name);
+ libie_aq_str(aq_status), vsi_name, vsi_name);
} else {
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
- i40e_aq_str(hw, aq_status), vsi_name,
- vsi->type);
+ libie_aq_str(aq_status), vsi_name, vsi->type);
}
}
}
@@ -2440,8 +2439,7 @@ i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
dev_warn(&vsi->back->pdev->dev,
"Error %s, forcing overflow promiscuous on %s\n",
- i40e_aq_str(hw, hw->aq.asq_last_status),
- vsi_name);
+ libie_aq_str(hw->aq.asq_last_status), vsi_name);
}
return aq_ret;
@@ -2482,7 +2480,7 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
dev_info(&pf->pdev->dev,
"Set default VSI failed, err %pe, aq_err %s\n",
ERR_PTR(aq_ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
}
} else {
aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
@@ -2494,7 +2492,7 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
dev_info(&pf->pdev->dev,
"set unicast promisc failed, err %pe, aq_err %s\n",
ERR_PTR(aq_ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
}
aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
hw,
@@ -2504,7 +2502,7 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
dev_info(&pf->pdev->dev,
"set multicast promisc failed, err %pe, aq_err %s\n",
ERR_PTR(aq_ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
}
}
@@ -2812,7 +2810,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
"set multi promisc failed on %s, err %pe aq_err %s\n",
vsi_name,
ERR_PTR(aq_ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
} else {
dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
cur_multipromisc ? "entering" : "leaving");
@@ -2833,7 +2831,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cur_promisc ? "on" : "off",
vsi_name,
ERR_PTR(aq_ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
}
}
out:
@@ -2954,27 +2952,6 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
- * i40e_ioctl - Access the hwtstamp interface
- * @netdev: network interface device structure
- * @ifr: interface request data
- * @cmd: ioctl command
- **/
-int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_pf *pf = np->vsi->back;
-
- switch (cmd) {
- case SIOCGHWTSTAMP:
- return i40e_ptp_get_ts_config(pf, ifr);
- case SIOCSHWTSTAMP:
- return i40e_ptp_set_ts_config(pf, ifr);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-/**
* i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
* @vsi: the vsi being adjusted
**/
@@ -3003,8 +2980,7 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
dev_info(&vsi->back->pdev->dev,
"update vlan stripping failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&vsi->back->hw,
- vsi->back->hw.aq.asq_last_status));
+ libie_aq_str(vsi->back->hw.aq.asq_last_status));
}
}
@@ -3038,8 +3014,7 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
dev_info(&vsi->back->pdev->dev,
"update vlan stripping failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&vsi->back->hw,
- vsi->back->hw.aq.asq_last_status));
+ libie_aq_str(vsi->back->hw.aq.asq_last_status));
}
}
@@ -3283,8 +3258,7 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
dev_info(&vsi->back->pdev->dev,
"add pvid failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&vsi->back->hw,
- vsi->back->hw.aq.asq_last_status));
+ libie_aq_str(vsi->back->hw.aq.asq_last_status));
return -ENOENT;
}
@@ -5554,7 +5528,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"couldn't get PF vsi bw config, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -5565,7 +5539,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"couldn't get PF vsi ets bw config, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -5755,7 +5729,7 @@ int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
if (ret) {
dev_info(&pf->pdev->dev, "Update vsi config failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
return ret;
}
/* update the local VSI info with updated queue map */
@@ -5811,7 +5785,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
dev_info(&pf->pdev->dev,
"Failed querying vsi bw info, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
goto out;
}
if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
@@ -5878,7 +5852,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
dev_info(&pf->pdev->dev,
"Update vsi tc config failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
goto out;
}
/* update the local VSI info with updated queue map */
@@ -5891,7 +5865,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
dev_info(&pf->pdev->dev,
"Failed updating vsi bw info, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
goto out;
}
@@ -6005,7 +5979,7 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
dev_err(&pf->pdev->dev,
"Failed set tx rate (%llu Mbps) for vsi->seid %u, err %pe aq_err %s\n",
max_tx_rate, seid, ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return ret;
}
@@ -6017,8 +5991,8 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
**/
static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
{
- enum i40e_admin_queue_err last_aq_status;
struct i40e_cloud_filter *cfilter;
+ enum libie_aq_err last_aq_status;
struct i40e_channel *ch, *ch_tmp;
struct i40e_pf *pf = vsi->back;
struct hlist_node *node;
@@ -6081,7 +6055,7 @@ static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"Failed to delete cloud filter, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, last_aq_status));
+ libie_aq_str(last_aq_status));
kfree(cfilter);
}
@@ -6216,7 +6190,7 @@ static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
dev_info(&pf->pdev->dev,
"Cannot set RSS lut, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
kfree(lut);
return ret;
}
@@ -6315,8 +6289,7 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
dev_info(&pf->pdev->dev,
"add new vsi failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -ENOENT;
}
@@ -6559,12 +6532,10 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
pf->last_sw_conf_valid_flags,
mode, NULL);
- if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
+ if (ret && hw->aq.asq_last_status != LIBIE_AQ_RC_ESRCH)
dev_err(&pf->pdev->dev,
"couldn't set switch config bits, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(hw,
- hw->aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status));
return ret;
}
@@ -6763,8 +6734,7 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
if (ret) {
dev_info(&pf->pdev->dev,
"VEB bw config failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
goto out;
}
@@ -6773,8 +6743,7 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
if (ret) {
dev_info(&pf->pdev->dev,
"Failed getting veb bw config, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
}
out:
@@ -6855,7 +6824,7 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
dev_info(&pf->pdev->dev,
"Resume Port Tx failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
/* Schedule PF reset to recover */
set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
i40e_service_event_schedule(pf);
@@ -6879,8 +6848,7 @@ static int i40e_suspend_port_tx(struct i40e_pf *pf)
if (ret) {
dev_info(&pf->pdev->dev,
"Suspend Port Tx failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
/* Schedule PF reset to recover */
set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
i40e_service_event_schedule(pf);
@@ -6919,8 +6887,7 @@ static int i40e_hw_set_dcb_config(struct i40e_pf *pf,
if (ret) {
dev_info(&pf->pdev->dev,
"Set DCB Config failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
goto out;
}
@@ -7036,8 +7003,7 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
if (ret) {
dev_info(&pf->pdev->dev,
"Modify Port ETS failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
goto out;
}
@@ -7076,8 +7042,7 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
if (ret) {
dev_info(&pf->pdev->dev,
"DCB Updated failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
goto out;
}
@@ -7160,8 +7125,7 @@ int i40e_dcb_sw_default_config(struct i40e_pf *pf)
if (err) {
dev_info(&pf->pdev->dev,
"Enable Port ETS failed, err %pe aq_err %s\n",
- ERR_PTR(err),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
err = -ENOENT;
goto out;
}
@@ -7234,14 +7198,13 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
dev_dbg(&pf->pdev->dev,
"DCBX offload is supported for this PF.\n");
}
- } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
+ } else if (pf->hw.aq.asq_last_status == LIBIE_AQ_RC_EPERM) {
dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
set_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags);
} else {
dev_info(&pf->pdev->dev,
"Query for DCB configuration failed, err %pe aq_err %s\n",
- ERR_PTR(err),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
}
out:
@@ -7497,8 +7460,7 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
if (err) {
dev_err(&pf->pdev->dev,
"failed to get phy cap., ret = %pe last_status = %s\n",
- ERR_PTR(err),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(hw->aq.asq_last_status));
return err;
}
speed = abilities.link_speed;
@@ -7509,8 +7471,7 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
if (err) {
dev_err(&pf->pdev->dev,
"failed to get phy cap., ret = %pe last_status = %s\n",
- ERR_PTR(err),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(hw->aq.asq_last_status));
return err;
}
@@ -7554,8 +7515,7 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
if (err) {
dev_err(&pf->pdev->dev,
"set phy config ret = %pe last_status = %s\n",
- ERR_PTR(err),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
return err;
}
@@ -7895,8 +7855,7 @@ static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
}
dev_info(&pf->pdev->dev,
"Error adding mac filter on macvlan err %pe, aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(hw, aq_err));
+ ERR_PTR(ret), libie_aq_str(aq_err));
netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
}
@@ -7968,8 +7927,7 @@ static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
if (ret) {
dev_info(&pf->pdev->dev,
"Update vsi tc config failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status));
return ret;
}
/* update the local VSI info with updated queue map */
@@ -8184,8 +8142,7 @@ static void i40e_fwd_del(struct net_device *netdev, void *vdev)
} else {
dev_info(&pf->pdev->dev,
"Error deleting mac filter on macvlan err %pe, aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(hw, aq_err));
+ ERR_PTR(ret), libie_aq_str(aq_err));
}
break;
}
@@ -9188,47 +9145,47 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
i40e_reset_fdir_filter_cnt(pf);
/* Reprogram the default input set for TCP/IPv4 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for TCP/IPv6 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP,
I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for UDP/IPv4 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for UDP/IPv6 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP,
I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for SCTP/IPv4 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for SCTP/IPv6 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP,
I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for Other/IPv4 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_FRAG_IPV4,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
/* Reprogram the default input set for Other/IPv6 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV6,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_FRAG_IPV6,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
}
@@ -9439,8 +9396,7 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
static int i40e_handle_lldp_event(struct i40e_pf *pf,
struct i40e_arq_event_info *e)
{
- struct i40e_aqc_lldp_get_mib *mib =
- (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
+ struct i40e_aqc_lldp_get_mib *mib = libie_aq_raw(&e->desc);
struct i40e_hw *hw = &pf->hw;
struct i40e_dcbx_config tmp_dcbx_cfg;
bool need_reconfig = false;
@@ -9497,8 +9453,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
dev_info(&pf->pdev->dev,
"Failed querying DCB configuration data from firmware, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
}
goto exit;
}
@@ -9579,8 +9534,7 @@ void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
struct i40e_arq_event_info *e)
{
- struct i40e_aqc_lan_overflow *data =
- (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
+ struct i40e_aqc_lan_overflow *data = libie_aq_raw(&e->desc);
u32 queue = le32_to_cpu(data->prtdcb_rupto);
u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
struct i40e_hw *hw = &pf->hw;
@@ -9656,7 +9610,7 @@ static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
* settings. It is safe to restore the default input set
* because there are no active TCPv4 filter rules.
*/
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
@@ -9959,6 +9913,9 @@ static void i40e_link_event(struct i40e_pf *pf)
new_link == netif_carrier_ok(vsi->netdev)))
return;
+ if (!new_link && old_link)
+ pf->link_down_events++;
+
i40e_print_link_message(vsi, new_link);
/* Notify the base of the switch tree connected to
@@ -10097,8 +10054,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
static void i40e_handle_link_event(struct i40e_pf *pf,
struct i40e_arq_event_info *e)
{
- struct i40e_aqc_get_link_status *status =
- (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
+ struct i40e_aqc_get_link_status *status = libie_aq_raw(&e->desc);
/* Do a new status request to re-enable LSE reporting
* and load new status information into the hw struct
@@ -10306,8 +10262,7 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't get PF vsi config, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
return;
}
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
@@ -10318,8 +10273,7 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
if (ret) {
dev_info(&pf->pdev->dev,
"update vsi switch failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
}
}
@@ -10342,8 +10296,7 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't get PF vsi config, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
return;
}
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
@@ -10354,8 +10307,7 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
if (ret) {
dev_info(&pf->pdev->dev,
"update vsi switch failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
}
}
@@ -10470,12 +10422,12 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
static int i40e_get_capabilities(struct i40e_pf *pf,
enum i40e_admin_queue_opc list_type)
{
- struct i40e_aqc_list_capabilities_element_resp *cap_buf;
+ struct libie_aqc_list_caps_elem *cap_buf;
u16 data_size;
int buf_len;
int err;
- buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
+ buf_len = 40 * sizeof(struct libie_aqc_list_caps_elem);
do {
cap_buf = kzalloc(buf_len, GFP_KERNEL);
if (!cap_buf)
@@ -10488,15 +10440,14 @@ static int i40e_get_capabilities(struct i40e_pf *pf,
/* data loaded, buffer no longer needed */
kfree(cap_buf);
- if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
+ if (pf->hw.aq.asq_last_status == LIBIE_AQ_RC_ENOMEM) {
/* retry with a larger buffer */
buf_len = data_size;
- } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
+ } else if (pf->hw.aq.asq_last_status != LIBIE_AQ_RC_OK || err) {
dev_info(&pf->pdev->dev,
"capability discovery failed, err %pe aq_err %s\n",
ERR_PTR(err),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -ENODEV;
}
} while (err);
@@ -10633,8 +10584,7 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
dev_dbg(&pf->pdev->dev,
"Failed to rebuild cloud filter, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return ret;
}
}
@@ -10875,8 +10825,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
ret = i40e_init_adminq(&pf->hw);
if (ret) {
dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
goto clear_recovery;
}
i40e_get_oem_version(&pf->hw);
@@ -10987,8 +10936,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
if (ret)
dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
/* Rebuild the VSIs and VEBs that existed before reset.
* They are still in our local switch element arrays, so only
@@ -11086,8 +11034,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
if (ret)
dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
}
/* reinit the misc interrupt */
if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
@@ -11118,8 +11065,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
dev_warn(&pf->pdev->dev,
"Failed to restore promiscuous setting: %s, err %pe aq_err %s\n",
pf->cur_promisc ? "on" : "off",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
i40e_reset_all_vfs(pf, true);
@@ -12325,8 +12271,7 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
dev_info(&pf->pdev->dev,
"Cannot get RSS key, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return ret;
}
}
@@ -12339,8 +12284,7 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
dev_info(&pf->pdev->dev,
"Cannot get RSS lut, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return ret;
}
}
@@ -12507,7 +12451,7 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
- hena |= i40e_pf_get_default_rss_hena(pf);
+ hena |= i40e_pf_get_default_rss_hashcfg(pf);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
@@ -13001,8 +12945,7 @@ static int i40e_udp_tunnel_set_port(struct net_device *netdev,
NULL);
if (ret) {
netdev_info(netdev, "add UDP port failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status));
return -EIO;
}
@@ -13021,8 +12964,7 @@ static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
if (ret) {
netdev_info(netdev, "delete UDP port failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status));
return -EIO;
}
@@ -13622,7 +13564,6 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = i40e_set_mac,
.ndo_change_mtu = i40e_change_mtu,
- .ndo_eth_ioctl = i40e_ioctl,
.ndo_tx_timeout = i40e_tx_timeout,
.ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
@@ -13650,6 +13591,8 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_xsk_wakeup = i40e_xsk_wakeup,
.ndo_dfwd_add_station = i40e_fwd_add,
.ndo_dfwd_del_station = i40e_fwd_del,
+ .ndo_hwtstamp_get = i40e_ptp_hwtstamp_get,
+ .ndo_hwtstamp_set = i40e_ptp_hwtstamp_set,
};
/**
@@ -13911,8 +13854,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"couldn't get PF vsi config, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -ENOENT;
}
vsi->info = ctxt.info;
@@ -13941,8 +13883,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"update vsi failed, err %d aq_err %s\n",
ret,
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
ret = -ENOENT;
goto err;
}
@@ -13961,8 +13902,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"update vsi failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
ret = -ENOENT;
goto err;
}
@@ -13985,8 +13925,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
"failed to configure TCs for main VSI tc_map 0x%08x, err %pe aq_err %s\n",
enabled_tc,
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
}
}
break;
@@ -14080,8 +14019,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
dev_info(&vsi->back->pdev->dev,
"add vsi failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
ret = -ENOENT;
goto err;
}
@@ -14111,8 +14049,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't get vsi bw info, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
/* VSI is already added so not tearing that up */
ret = 0;
}
@@ -14560,8 +14497,7 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
if (ret) {
dev_info(&pf->pdev->dev,
"query veb bw config failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status));
goto out;
}
@@ -14570,8 +14506,7 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
if (ret) {
dev_info(&pf->pdev->dev,
"query veb bw ets config failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status));
goto out;
}
@@ -14759,8 +14694,7 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't add VEB, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
return -EPERM;
}
@@ -14770,16 +14704,14 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't get VEB statistics idx, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
return -EPERM;
}
ret = i40e_veb_get_bw_info(veb);
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't get VEB bw info, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
return -ENOENT;
}
@@ -14974,9 +14906,7 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
if (ret) {
dev_info(&pf->pdev->dev,
"get switch config failed err %d aq_err %s\n",
- ret,
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ ret, libie_aq_str(pf->hw.aq.asq_last_status));
kfree(aq_buf);
return -ENOENT;
}
@@ -15021,8 +14951,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't fetch switch config, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
return ret;
}
i40e_pf_reset_stats(pf);
@@ -15045,12 +14974,11 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
NULL);
- if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+ if (ret && pf->hw.aq.asq_last_status != LIBIE_AQ_RC_ESRCH) {
dev_info(&pf->pdev->dev,
"couldn't set switch config bits, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
/* not a fatal problem, just keep going */
}
pf->last_sw_conf_valid_flags = valid_flags;
@@ -15891,7 +15819,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port;
pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port;
- pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared;
pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS;
pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN |
@@ -15953,8 +15880,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
if (err)
dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n",
- ERR_PTR(err),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
/* VF MDD event logs are rate limited to one second intervals */
ratelimit_state_init(&pf->mdd_message_rate_limit, 1 * HZ, 1);
@@ -15976,8 +15902,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n",
ERR_PTR(err),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
}
/* The main driver is (mostly) up and happy. We need to set this state
* before setting up the misc vector or we get a race and the vector
@@ -16108,8 +16033,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
if (err)
dev_dbg(&pf->pdev->dev, "get requested speeds ret = %pe last_status = %s\n",
- ERR_PTR(err),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
/* set the FEC config due to the board capabilities */
@@ -16119,8 +16043,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
if (err)
dev_dbg(&pf->pdev->dev, "get supported phy types ret = %pe last_status = %s\n",
- ERR_PTR(err),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
/* make sure the MFS hasn't been set lower than the default */
#define MAX_FRAME_SIZE_DEFAULT 0x2600
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 7f0936f4e05e..ed3c54e36be3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -997,7 +997,7 @@ static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
u8 *bytes, int *perrno)
{
struct i40e_asq_cmd_details cmd_details;
- struct i40e_aq_desc *aq_desc;
+ struct libie_aq_desc *aq_desc;
u32 buff_size = 0;
u8 *buff = NULL;
u32 aq_desc_len;
@@ -1011,7 +1011,7 @@ static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
- aq_desc_len = sizeof(struct i40e_aq_desc);
+ aq_desc_len = sizeof(struct libie_aq_desc);
memset(&hw->nvm_wb_desc, 0, aq_desc_len);
/* get the aq descriptor */
@@ -1022,7 +1022,7 @@ static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
*perrno = -EINVAL;
return -EINVAL;
}
- aq_desc = (struct i40e_aq_desc *)bytes;
+ aq_desc = (struct libie_aq_desc *)bytes;
/* if data buffer needed, make sure it's ready */
aq_data_len = cmd->data_size - aq_desc_len;
@@ -1053,7 +1053,7 @@ static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_NVM,
"%s err %pe aq_err %s\n",
__func__, ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
return status;
}
@@ -1087,7 +1087,7 @@ static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
- aq_desc_len = sizeof(struct i40e_aq_desc);
+ aq_desc_len = sizeof(struct libie_aq_desc);
aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen);
/* check offset range */
@@ -1154,7 +1154,7 @@ static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
- aq_desc_len = sizeof(struct i40e_aq_desc);
+ aq_desc_len = sizeof(struct libie_aq_desc);
aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen);
/* check copylength range */
@@ -1442,7 +1442,7 @@ retry:
* so here we try to reacquire the semaphore then retry the write.
* We only do one retry, then give up.
*/
- if (status && hw->aq.asq_last_status == I40E_AQ_RC_EBUSY &&
+ if (status && hw->aq.asq_last_status == LIBIE_AQ_RC_EBUSY &&
!retry_attempt) {
u32 old_asq_status = hw->aq.asq_last_status;
int old_status = status;
@@ -1628,9 +1628,9 @@ void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
* @desc: AdminQ descriptor
**/
void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
- struct i40e_aq_desc *desc)
+ struct libie_aq_desc *desc)
{
- u32 aq_desc_len = sizeof(struct i40e_aq_desc);
+ u32 aq_desc_len = sizeof(struct libie_aq_desc);
if (opcode == hw->nvm_wait_opcode) {
memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 099bb8ab7d70..aef5de53ce3b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -23,22 +23,22 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
struct i40e_arq_event_info *e,
u16 *events_pending);
int
-i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+i40e_asq_send_command(struct i40e_hw *hw, struct libie_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
int
-i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+i40e_asq_send_command_atomic(struct i40e_hw *hw, struct libie_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
bool is_atomic_context);
int
i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
+ struct libie_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
bool is_atomic_context,
- enum i40e_admin_queue_err *aq_status);
+ enum libie_aq_err *aq_status);
/* debug function for adminq */
void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
@@ -46,7 +46,6 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
bool i40e_check_asq_alive(struct i40e_hw *hw);
int i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
-const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
bool pf_lut, u8 *lut, u16 lut_size);
@@ -155,7 +154,7 @@ int
i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_add_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status);
+ enum libie_aq_err *aq_status);
int i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details);
@@ -163,7 +162,7 @@ int
i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status);
+ enum libie_aq_err *aq_status);
int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
@@ -339,7 +338,7 @@ int i40e_nvmupd_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *errno);
void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
- struct i40e_aq_desc *desc);
+ struct libie_aq_desc *desc);
void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index b72a4b5d76b9..33535418178b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -550,7 +550,7 @@ static int i40e_ptp_enable_pin(struct i40e_pf *pf, unsigned int chan,
pins.gpio_4 = pf->ptp_pins->gpio_4;
/* To turn on the pin - find the corresponding one based on
- * the given index. To to turn the function off - find
+ * the given index. To turn the function off - find
* which pin had it assigned. Don't use ptp_find_pin here
* because it tries to lock the pincfg_mux which is locked by
* ptp_pin_store() that calls here.
@@ -912,23 +912,26 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
}
/**
- * i40e_ptp_get_ts_config - ioctl interface to read the HW timestamping
- * @pf: Board private structure
- * @ifr: ioctl data
+ * i40e_ptp_hwtstamp_get - interface to read the HW timestamping
+ * @netdev: Network device structure
+ * @config: Timestamping configuration structure
*
* Obtain the current hardware timestamping settigs as requested. To do this,
* keep a shadow copy of the timestamp settings rather than attempting to
* deconstruct it from the registers.
**/
-int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
+int i40e_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config *config = &pf->tstamp_config;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags))
return -EOPNOTSUPP;
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
+ *config = pf->tstamp_config;
+
+ return 0;
}
/**
@@ -1167,7 +1170,7 @@ int i40e_ptp_alloc_pins(struct i40e_pf *pf)
* more broad if the specific filter is not directly supported.
**/
static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
struct i40e_hw *hw = &pf->hw;
u32 tsyntype, regval;
@@ -1290,9 +1293,10 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
}
/**
- * i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping
- * @pf: Board private structure
- * @ifr: ioctl data
+ * i40e_ptp_hwtstamp_set - interface to control the HW timestamping
+ * @netdev: Network device structure
+ * @config: Timestamping configuration structure
+ * @extack: Netlink extended ack structure for error reporting
*
* Respond to the user filter requests and make the appropriate hardware
* changes here. The XL710 cannot support splitting of the Tx/Rx timestamping
@@ -1303,26 +1307,25 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
* as the user receives the timestamps they care about and the user is notified
* the filter has been broadened.
**/
-int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
+int i40e_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
int err;
if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags))
return -EOPNOTSUPP;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = i40e_ptp_set_timestamp_mode(pf, &config);
+ err = i40e_ptp_set_timestamp_mode(pf, config);
if (err)
return err;
/* save these settings for future reference */
- pf->tstamp_config = config;
+ pf->tstamp_config = *config;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index c006f716a3bd..048c33039130 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/bpf_trace.h>
+#include <linux/net/intel/libie/pctype.h>
#include <linux/net/intel/libie/rx.h>
#include <linux/prefetch.h>
#include <linux/sctp.h>
@@ -397,12 +398,12 @@ static int i40e_add_del_fdir_udp(struct i40e_vsi *vsi,
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_UDPIP_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP);
else
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_UDPIP6_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP);
if (ret) {
kfree(raw_packet);
@@ -444,12 +445,12 @@ static int i40e_add_del_fdir_tcp(struct i40e_vsi *vsi,
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_TCPIP_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP);
else
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_TCPIP6_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP);
if (ret) {
kfree(raw_packet);
@@ -499,12 +500,12 @@ static int i40e_add_del_fdir_sctp(struct i40e_vsi *vsi,
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_SCTPIP_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV4_SCTP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP);
else
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_SCTPIP6_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV6_SCTP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP);
if (ret) {
kfree(raw_packet);
@@ -543,11 +544,11 @@ static int i40e_add_del_fdir_ip(struct i40e_vsi *vsi,
int i;
if (ipv4) {
- iter_start = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
- iter_end = I40E_FILTER_PCTYPE_FRAG_IPV4;
+ iter_start = LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ iter_end = LIBIE_FILTER_PCTYPE_FRAG_IPV4;
} else {
- iter_start = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
- iter_end = I40E_FILTER_PCTYPE_FRAG_IPV6;
+ iter_start = LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER;
+ iter_end = LIBIE_FILTER_PCTYPE_FRAG_IPV6;
}
for (i = iter_start; i <= iter_end; i++) {
@@ -2948,9 +2949,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
flex_ptype = FIELD_PREP(I40E_TXD_FLTR_QW0_QINDEX_MASK,
tx_ring->queue_index);
flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
- (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
+ (LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP <<
I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
- (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
+ (LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP <<
I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 7c26c9a2bf65..1e5fd63d47f4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -4,6 +4,7 @@
#ifndef _I40E_TXRX_H_
#define _I40E_TXRX_H_
+#include <linux/net/intel/libie/pctype.h>
#include <net/xdp.h>
#include "i40e_type.h"
@@ -71,30 +72,30 @@ enum i40e_dyn_idx {
#define I40E_SW_ITR I40E_IDX_ITR2
/* Supported RSS offloads */
-#define I40E_DEFAULT_RSS_HENA ( \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
- BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
-
-#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
-
-#define i40e_pf_get_default_rss_hena(pf) \
+#define I40E_DEFAULT_RSS_HASHCFG ( \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_L2_PAYLOAD))
+
+#define I40E_DEFAULT_RSS_HASHCFG_EXPANDED (I40E_DEFAULT_RSS_HASHCFG | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+
+#define i40e_pf_get_default_rss_hashcfg(pf) \
(test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, (pf)->hw.caps) ? \
- I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
+ I40E_DEFAULT_RSS_HASHCFG_EXPANDED : I40E_DEFAULT_RSS_HASHCFG)
/* Supported Rx Buffer Sizes (a multiple of 128) */
#define I40E_RXBUFFER_256 256
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 28568e126850..ed8bbdb586da 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -24,7 +24,7 @@
/* forward declaration */
struct i40e_hw;
-typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
+typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct libie_aq_desc *);
/* Data type manipulation macros. */
@@ -555,8 +555,8 @@ struct i40e_hw {
/* state of nvm update process */
enum i40e_nvmupd_state nvmupd_state;
- struct i40e_aq_desc nvm_wb_desc;
- struct i40e_aq_desc nvm_aq_event_desc;
+ struct libie_aq_desc nvm_wb_desc;
+ struct libie_aq_desc nvm_aq_event_desc;
struct i40e_virt_mem nvm_buff;
bool nvm_release_on_done;
u16 nvm_wait_opcode;
@@ -929,38 +929,6 @@ struct i40e_filter_program_desc {
#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \
I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
-/* Packet Classifier Types for filters */
-enum i40e_filter_pctype {
- /* Note: Values 0-28 are reserved for future use.
- * Value 29, 30, 32 are not supported on XL710 and X710.
- */
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
- I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
- I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
- I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
- I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
- /* Note: Values 37-38 are reserved for future use.
- * Value 39, 40, 42 are not supported on XL710 and X710.
- */
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
- I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
- I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
- I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
- I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
- I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
- I40E_FILTER_PCTYPE_FRAG_IPV6 = 46,
- /* Note: Value 47 is reserved for future use */
- I40E_FILTER_PCTYPE_FCOE_OX = 48,
- I40E_FILTER_PCTYPE_FCOE_RX = 49,
- I40E_FILTER_PCTYPE_FCOE_OTHER = 50,
- /* Note: Values 51-62 are reserved for future use */
- I40E_FILTER_PCTYPE_L2_PAYLOAD = 63,
-};
-
enum i40e_filter_program_desc_dest {
I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0,
I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 88e6bef69342..9b8efdeafbcf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -812,7 +812,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
}
if (!idx) {
- u64 hena = i40e_pf_get_default_rss_hena(pf);
+ u64 hashcfg = i40e_pf_get_default_rss_hashcfg(pf);
u8 broadcast[ETH_ALEN];
vf->lan_vsi_idx = vsi->idx;
@@ -841,8 +841,9 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
dev_info(&pf->pdev->dev,
"Could not allocate VF broadcast filter\n");
spin_unlock_bh(&vsi->mac_filter_hash_lock);
- wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
- wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
+ wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hashcfg);
+ wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id),
+ (u32)(hashcfg >> 32));
/* program mac filter only for VF VSI */
ret = i40e_sync_vsi_filters(vsi);
if (ret)
@@ -1289,9 +1290,8 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
dev_err(&pf->pdev->dev,
"VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
- vf->vf_id,
- ERR_PTR(aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
+ vf->vf_id, ERR_PTR(aq_ret),
+ libie_aq_str(aq_err));
return aq_ret;
}
@@ -1305,9 +1305,8 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
dev_err(&pf->pdev->dev,
"VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
- vf->vf_id,
- ERR_PTR(aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
+ vf->vf_id, ERR_PTR(aq_ret),
+ libie_aq_str(aq_err));
}
return aq_ret;
@@ -1322,9 +1321,8 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
dev_err(&pf->pdev->dev,
"VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
- vf->vf_id,
- ERR_PTR(aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
+ vf->vf_id, ERR_PTR(aq_ret),
+ libie_aq_str(aq_err));
if (!aq_tmp)
aq_tmp = aq_ret;
@@ -1338,9 +1336,8 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
dev_err(&pf->pdev->dev,
"VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
- vf->vf_id,
- ERR_PTR(aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
+ vf->vf_id, ERR_PTR(aq_ret),
+ libie_aq_str(aq_err));
if (!aq_tmp)
aq_tmp = aq_ret;
@@ -3137,10 +3134,10 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
const u8 *addr = al->list[i].addr;
/* Allow to delete VF primary MAC only if it was not set
- * administratively by PF or if VF is trusted.
+ * administratively by PF.
*/
if (ether_addr_equal(addr, vf->default_lan_addr.addr)) {
- if (i40e_can_vf_change_mac(vf))
+ if (!vf->pf_set_mac)
was_unimac_deleted = true;
else
continue;
@@ -3447,15 +3444,15 @@ err:
}
/**
- * i40e_vc_get_rss_hena
+ * i40e_vc_get_rss_hashcfg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
*
- * Return the RSS HENA bits allowed by the hardware
+ * Return the RSS Hash configuration bits allowed by the hardware
**/
-static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
+static int i40e_vc_get_rss_hashcfg(struct i40e_vf *vf, u8 *msg)
{
- struct virtchnl_rss_hena *vrh = NULL;
+ struct virtchnl_rss_hashcfg *vrh = NULL;
struct i40e_pf *pf = vf->pf;
int aq_ret = 0;
int len = 0;
@@ -3464,7 +3461,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
aq_ret = -EINVAL;
goto err;
}
- len = sizeof(struct virtchnl_rss_hena);
+ len = sizeof(struct virtchnl_rss_hashcfg);
vrh = kzalloc(len, GFP_KERNEL);
if (!vrh) {
@@ -3472,26 +3469,26 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
len = 0;
goto err;
}
- vrh->hena = i40e_pf_get_default_rss_hena(pf);
+ vrh->hashcfg = i40e_pf_get_default_rss_hashcfg(pf);
err:
/* send the response back to the VF */
- aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
+ aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS,
aq_ret, (u8 *)vrh, len);
kfree(vrh);
return aq_ret;
}
/**
- * i40e_vc_set_rss_hena
+ * i40e_vc_set_rss_hashcfg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
*
- * Set the RSS HENA bits for the VF
+ * Set the RSS Hash configuration bits for the VF
**/
-static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
+static int i40e_vc_set_rss_hashcfg(struct i40e_vf *vf, u8 *msg)
{
- struct virtchnl_rss_hena *vrh =
- (struct virtchnl_rss_hena *)msg;
+ struct virtchnl_rss_hashcfg *vrh =
+ (struct virtchnl_rss_hashcfg *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
int aq_ret = 0;
@@ -3500,13 +3497,14 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
aq_ret = -EINVAL;
goto err;
}
- i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id),
+ (u32)vrh->hashcfg);
i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
- (u32)(vrh->hena >> 32));
+ (u32)(vrh->hashcfg >> 32));
/* send the response to the VF */
err:
- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HASHCFG, aq_ret);
}
/**
@@ -3746,8 +3744,7 @@ static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
dev_err(&pf->pdev->dev,
"VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
vf->vf_id, ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
hlist_del(&cfilter->cloud_node);
kfree(cfilter);
@@ -3849,7 +3846,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
dev_err(&pf->pdev->dev,
"VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
vf->vf_id, ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
goto err;
}
@@ -3985,7 +3982,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
dev_err(&pf->pdev->dev,
"VF %d: Failed to add cloud filter, err %pe aq_err %s\n",
vf->vf_id, ERR_PTR(aq_ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
goto err_free;
}
@@ -4253,11 +4250,11 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
case VIRTCHNL_OP_CONFIG_RSS_LUT:
ret = i40e_vc_config_rss_lut(vf, msg);
break;
- case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
- ret = i40e_vc_get_rss_hena(vf, msg);
+ case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS:
+ ret = i40e_vc_get_rss_hashcfg(vf, msg);
break;
- case VIRTCHNL_OP_SET_RSS_HENA:
- ret = i40e_vc_set_rss_hena(vf, msg);
+ case VIRTCHNL_OP_SET_RSS_HASHCFG:
+ ret = i40e_vc_set_rss_hashcfg(vf, msg);
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
ret = i40e_vc_enable_vlan_stripping(vf, msg);
@@ -5006,7 +5003,7 @@ int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
vf_stats->broadcast = stats->rx_broadcast;
vf_stats->multicast = stats->rx_multicast;
vf_stats->rx_dropped = stats->rx_discards + stats->rx_discards_other;
- vf_stats->tx_dropped = stats->tx_discards;
+ vf_stats->tx_dropped = stats->tx_errors;
return 0;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index f7a98ff43a57..a87e0c6d4017 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -114,8 +114,6 @@ struct iavf_q_vector {
u16 reg_idx; /* register index of the interrupt */
char name[IFNAMSIZ + 15];
bool arm_wb_state;
- cpumask_t affinity_mask;
- struct irq_affinity_notify affinity_notify;
};
/* Helper macros to switch between ints/sec and what the register uses.
@@ -315,8 +313,8 @@ struct iavf_adapter {
#define IAVF_FLAG_AQ_CONFIGURE_RSS BIT_ULL(9) /* direct AQ config */
#define IAVF_FLAG_AQ_GET_CONFIG BIT_ULL(10)
/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */
-#define IAVF_FLAG_AQ_GET_HENA BIT_ULL(11)
-#define IAVF_FLAG_AQ_SET_HENA BIT_ULL(12)
+#define IAVF_FLAG_AQ_GET_RSS_HASHCFG BIT_ULL(11)
+#define IAVF_FLAG_AQ_SET_RSS_HASHCFG BIT_ULL(12)
#define IAVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13)
#define IAVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14)
#define IAVF_FLAG_AQ_SET_RSS_HFUNC BIT_ULL(15)
@@ -456,7 +454,7 @@ struct iavf_adapter {
u32 aq_wait_count;
/* RSS stuff */
enum virtchnl_rss_algorithm hfunc;
- u64 hena;
+ u64 rss_hashcfg;
u16 rss_key_size;
u16 rss_lut_size;
u8 *rss_key;
@@ -600,8 +598,8 @@ void iavf_set_promiscuous(struct iavf_adapter *adapter);
bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter);
void iavf_request_stats(struct iavf_adapter *adapter);
int iavf_request_reset(struct iavf_adapter *adapter);
-void iavf_get_hena(struct iavf_adapter *adapter);
-void iavf_set_hena(struct iavf_adapter *adapter);
+void iavf_get_rss_hashcfg(struct iavf_adapter *adapter);
+void iavf_set_rss_hashcfg(struct iavf_adapter *adapter);
void iavf_set_rss_key(struct iavf_adapter *adapter);
void iavf_set_rss_lut(struct iavf_adapter *adapter);
void iavf_set_rss_hfunc(struct iavf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
index 82fcd18ad660..6937b7dd44cb 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
@@ -18,7 +18,7 @@ static enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw)
ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
iavf_mem_atq_ring,
(hw->aq.num_asq_entries *
- sizeof(struct iavf_aq_desc)),
+ sizeof(struct libie_aq_desc)),
IAVF_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
return ret_code;
@@ -45,7 +45,7 @@ static enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw)
ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
iavf_mem_arq_ring,
(hw->aq.num_arq_entries *
- sizeof(struct iavf_aq_desc)),
+ sizeof(struct libie_aq_desc)),
IAVF_ADMINQ_DESC_ALIGNMENT);
return ret_code;
@@ -81,7 +81,7 @@ static void iavf_free_adminq_arq(struct iavf_hw *hw)
**/
static enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
{
- struct iavf_aq_desc *desc;
+ struct libie_aq_desc *desc;
struct iavf_dma_mem *bi;
enum iavf_status ret_code;
int i;
@@ -111,9 +111,9 @@ static enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
/* now configure the descriptors for use */
desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
- desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_BUF);
if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
- desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB);
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
desc->opcode = 0;
/* This is in accordance with Admin queue design, there is no
* register for buffer size configuration
@@ -122,12 +122,12 @@ static enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
desc->retval = 0;
desc->cookie_high = 0;
desc->cookie_low = 0;
- desc->params.external.addr_high =
+ desc->params.generic.addr_high =
cpu_to_le32(upper_32_bits(bi->pa));
- desc->params.external.addr_low =
+ desc->params.generic.addr_low =
cpu_to_le32(lower_32_bits(bi->pa));
- desc->params.external.param0 = 0;
- desc->params.external.param1 = 0;
+ desc->params.generic.param0 = 0;
+ desc->params.generic.param1 = 0;
}
alloc_arq_bufs:
@@ -558,8 +558,8 @@ static u16 iavf_clean_asq(struct iavf_hw *hw)
struct iavf_adminq_ring *asq = &hw->aq.asq;
struct iavf_asq_cmd_details *details;
u16 ntc = asq->next_to_clean;
- struct iavf_aq_desc desc_cb;
- struct iavf_aq_desc *desc;
+ struct libie_aq_desc desc_cb;
+ struct libie_aq_desc *desc;
desc = IAVF_ADMINQ_DESC(*asq, ntc);
details = IAVF_ADMINQ_DETAILS(*asq, ntc);
@@ -573,7 +573,7 @@ static u16 iavf_clean_asq(struct iavf_hw *hw)
desc_cb = *desc;
cb_func(hw, &desc_cb);
}
- memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
+ memset((void *)desc, 0, sizeof(struct libie_aq_desc));
memset((void *)details, 0,
sizeof(struct iavf_asq_cmd_details));
ntc++;
@@ -615,14 +615,14 @@ bool iavf_asq_done(struct iavf_hw *hw)
* queue. It runs the queue, cleans the queue, etc
**/
enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
- struct iavf_aq_desc *desc,
+ struct libie_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct iavf_asq_cmd_details *cmd_details)
{
struct iavf_dma_mem *dma_buff = NULL;
struct iavf_asq_cmd_details *details;
- struct iavf_aq_desc *desc_on_ring;
+ struct libie_aq_desc *desc_on_ring;
bool cmd_completed = false;
enum iavf_status status = 0;
u16 retval = 0;
@@ -637,7 +637,7 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
goto asq_send_command_error;
}
- hw->aq.asq_last_status = IAVF_AQ_RC_OK;
+ hw->aq.asq_last_status = LIBIE_AQ_RC_OK;
val = rd32(hw, IAVF_VF_ATQH1);
if (val >= hw->aq.num_asq_entries) {
@@ -717,9 +717,9 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
/* Update the address values in the desc with the pa value
* for respective buffer
*/
- desc_on_ring->params.external.addr_high =
+ desc_on_ring->params.generic.addr_high =
cpu_to_le32(upper_32_bits(dma_buff->pa));
- desc_on_ring->params.external.addr_low =
+ desc_on_ring->params.generic.addr_low =
cpu_to_le32(lower_32_bits(dma_buff->pa));
}
@@ -766,13 +766,13 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
retval &= 0xff;
}
cmd_completed = true;
- if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK)
+ if ((enum libie_aq_err)retval == LIBIE_AQ_RC_OK)
status = 0;
- else if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_EBUSY)
+ else if ((enum libie_aq_err)retval == LIBIE_AQ_RC_EBUSY)
status = IAVF_ERR_NOT_READY;
else
status = IAVF_ERR_ADMIN_QUEUE_ERROR;
- hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval;
+ hw->aq.asq_last_status = (enum libie_aq_err)retval;
}
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
@@ -809,12 +809,12 @@ asq_send_command_error:
*
* Fill the desc with default values
**/
-void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc, u16 opcode)
+void iavf_fill_default_direct_cmd_desc(struct libie_aq_desc *desc, u16 opcode)
{
/* zero out the desc */
- memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
+ memset((void *)desc, 0, sizeof(struct libie_aq_desc));
desc->opcode = cpu_to_le16(opcode);
- desc->flags = cpu_to_le16(IAVF_AQ_FLAG_SI);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_SI);
}
/**
@@ -832,7 +832,7 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
u16 *pending)
{
u16 ntc = hw->aq.arq.next_to_clean;
- struct iavf_aq_desc *desc;
+ struct libie_aq_desc *desc;
enum iavf_status ret_code = 0;
struct iavf_dma_mem *bi;
u16 desc_idx;
@@ -866,9 +866,9 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
desc_idx = ntc;
hw->aq.arq_last_status =
- (enum iavf_admin_queue_err)le16_to_cpu(desc->retval);
+ (enum libie_aq_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
- if (flags & IAVF_AQ_FLAG_ERR) {
+ if (flags & LIBIE_AQ_FLAG_ERR) {
ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
iavf_debug(hw,
IAVF_DEBUG_AQ_MESSAGE,
@@ -892,14 +892,14 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
* size
*/
bi = &hw->aq.arq.r.arq_bi[ntc];
- memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
+ memset((void *)desc, 0, sizeof(struct libie_aq_desc));
- desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_BUF);
if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
- desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB);
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
desc->datalen = cpu_to_le16((u16)bi->size);
- desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
- desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
+ desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
/* set tail = the last cleaned desc index. */
wr32(hw, IAVF_VF_ARQT1, ntc);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.h b/drivers/net/ethernet/intel/iavf/iavf_adminq.h
index 406506f64bdd..bbf5c4b3a2ae 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.h
@@ -9,7 +9,7 @@
#include "iavf_adminq_cmd.h"
#define IAVF_ADMINQ_DESC(R, i) \
- (&(((struct iavf_aq_desc *)((R).desc_buf.va))[i]))
+ (&(((struct libie_aq_desc *)((R).desc_buf.va))[i]))
#define IAVF_ADMINQ_DESC_ALIGNMENT 4096
@@ -39,7 +39,7 @@ struct iavf_asq_cmd_details {
u16 flags_dis;
bool async;
bool postpone;
- struct iavf_aq_desc *wb_desc;
+ struct libie_aq_desc *wb_desc;
};
#define IAVF_ADMINQ_DETAILS(R, i) \
@@ -47,7 +47,7 @@ struct iavf_asq_cmd_details {
/* ARQ event information */
struct iavf_arq_event_info {
- struct iavf_aq_desc desc;
+ struct libie_aq_desc desc;
u16 msg_len;
u16 buf_len;
u8 *msg_buf;
@@ -72,8 +72,8 @@ struct iavf_adminq_info {
struct mutex arq_mutex; /* Receive queue lock */
/* last status values on send and receive queues */
- enum iavf_admin_queue_err asq_last_status;
- enum iavf_admin_queue_err arq_last_status;
+ enum libie_aq_err asq_last_status;
+ enum libie_aq_err arq_last_status;
};
/**
@@ -123,6 +123,6 @@ static inline int iavf_aq_rc_to_posix(int aq_ret, int aq_rc)
#define IAVF_AQ_LARGE_BUF 512
#define IAVF_ASQ_CMD_TIMEOUT 250000 /* usecs */
-void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc, u16 opcode);
+void iavf_fill_default_direct_cmd_desc(struct libie_aq_desc *desc, u16 opcode);
#endif /* _IAVF_ADMINQ_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h b/drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h
index bc512308557b..0482c9ce9b9c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h
@@ -4,6 +4,8 @@
#ifndef _IAVF_ADMINQ_CMD_H_
#define _IAVF_ADMINQ_CMD_H_
+#include <linux/net/intel/libie/adminq.h>
+
/* This header file defines the iavf Admin Queue commands and is shared between
* iavf Firmware and Software.
*
@@ -21,87 +23,6 @@
/* API version 1.7 implements additional link and PHY-specific APIs */
#define IAVF_MINOR_VER_GET_LINK_INFO_XL710 0x0007
-struct iavf_aq_desc {
- __le16 flags;
- __le16 opcode;
- __le16 datalen;
- __le16 retval;
- __le32 cookie_high;
- __le32 cookie_low;
- union {
- struct {
- __le32 param0;
- __le32 param1;
- __le32 param2;
- __le32 param3;
- } internal;
- struct {
- __le32 param0;
- __le32 param1;
- __le32 addr_high;
- __le32 addr_low;
- } external;
- u8 raw[16];
- } params;
-};
-
-/* Flags sub-structure
- * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
- * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
- */
-
-/* command flags and offsets*/
-#define IAVF_AQ_FLAG_DD_SHIFT 0
-#define IAVF_AQ_FLAG_CMP_SHIFT 1
-#define IAVF_AQ_FLAG_ERR_SHIFT 2
-#define IAVF_AQ_FLAG_VFE_SHIFT 3
-#define IAVF_AQ_FLAG_LB_SHIFT 9
-#define IAVF_AQ_FLAG_RD_SHIFT 10
-#define IAVF_AQ_FLAG_VFC_SHIFT 11
-#define IAVF_AQ_FLAG_BUF_SHIFT 12
-#define IAVF_AQ_FLAG_SI_SHIFT 13
-#define IAVF_AQ_FLAG_EI_SHIFT 14
-#define IAVF_AQ_FLAG_FE_SHIFT 15
-
-#define IAVF_AQ_FLAG_DD BIT(IAVF_AQ_FLAG_DD_SHIFT) /* 0x1 */
-#define IAVF_AQ_FLAG_CMP BIT(IAVF_AQ_FLAG_CMP_SHIFT) /* 0x2 */
-#define IAVF_AQ_FLAG_ERR BIT(IAVF_AQ_FLAG_ERR_SHIFT) /* 0x4 */
-#define IAVF_AQ_FLAG_VFE BIT(IAVF_AQ_FLAG_VFE_SHIFT) /* 0x8 */
-#define IAVF_AQ_FLAG_LB BIT(IAVF_AQ_FLAG_LB_SHIFT) /* 0x200 */
-#define IAVF_AQ_FLAG_RD BIT(IAVF_AQ_FLAG_RD_SHIFT) /* 0x400 */
-#define IAVF_AQ_FLAG_VFC BIT(IAVF_AQ_FLAG_VFC_SHIFT) /* 0x800 */
-#define IAVF_AQ_FLAG_BUF BIT(IAVF_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define IAVF_AQ_FLAG_SI BIT(IAVF_AQ_FLAG_SI_SHIFT) /* 0x2000 */
-#define IAVF_AQ_FLAG_EI BIT(IAVF_AQ_FLAG_EI_SHIFT) /* 0x4000 */
-#define IAVF_AQ_FLAG_FE BIT(IAVF_AQ_FLAG_FE_SHIFT) /* 0x8000 */
-
-/* error codes */
-enum iavf_admin_queue_err {
- IAVF_AQ_RC_OK = 0, /* success */
- IAVF_AQ_RC_EPERM = 1, /* Operation not permitted */
- IAVF_AQ_RC_ENOENT = 2, /* No such element */
- IAVF_AQ_RC_ESRCH = 3, /* Bad opcode */
- IAVF_AQ_RC_EINTR = 4, /* operation interrupted */
- IAVF_AQ_RC_EIO = 5, /* I/O error */
- IAVF_AQ_RC_ENXIO = 6, /* No such resource */
- IAVF_AQ_RC_E2BIG = 7, /* Arg too long */
- IAVF_AQ_RC_EAGAIN = 8, /* Try again */
- IAVF_AQ_RC_ENOMEM = 9, /* Out of memory */
- IAVF_AQ_RC_EACCES = 10, /* Permission denied */
- IAVF_AQ_RC_EFAULT = 11, /* Bad address */
- IAVF_AQ_RC_EBUSY = 12, /* Device or resource busy */
- IAVF_AQ_RC_EEXIST = 13, /* object already exists */
- IAVF_AQ_RC_EINVAL = 14, /* Invalid argument */
- IAVF_AQ_RC_ENOTTY = 15, /* Not a typewriter */
- IAVF_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
- IAVF_AQ_RC_ENOSYS = 17, /* Function not implemented */
- IAVF_AQ_RC_ERANGE = 18, /* Parameter out of range */
- IAVF_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
- IAVF_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
- IAVF_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
- IAVF_AQ_RC_EFBIG = 22, /* File too large */
-};
-
/* Admin Queue command opcodes */
enum iavf_admin_queue_opc {
/* aq commands */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c
index aa751ce3425b..614a886bca99 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_common.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_common.c
@@ -8,66 +8,6 @@
#include "iavf_prototype.h"
/**
- * iavf_aq_str - convert AQ err code to a string
- * @hw: pointer to the HW structure
- * @aq_err: the AQ error code to convert
- **/
-const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err)
-{
- switch (aq_err) {
- case IAVF_AQ_RC_OK:
- return "OK";
- case IAVF_AQ_RC_EPERM:
- return "IAVF_AQ_RC_EPERM";
- case IAVF_AQ_RC_ENOENT:
- return "IAVF_AQ_RC_ENOENT";
- case IAVF_AQ_RC_ESRCH:
- return "IAVF_AQ_RC_ESRCH";
- case IAVF_AQ_RC_EINTR:
- return "IAVF_AQ_RC_EINTR";
- case IAVF_AQ_RC_EIO:
- return "IAVF_AQ_RC_EIO";
- case IAVF_AQ_RC_ENXIO:
- return "IAVF_AQ_RC_ENXIO";
- case IAVF_AQ_RC_E2BIG:
- return "IAVF_AQ_RC_E2BIG";
- case IAVF_AQ_RC_EAGAIN:
- return "IAVF_AQ_RC_EAGAIN";
- case IAVF_AQ_RC_ENOMEM:
- return "IAVF_AQ_RC_ENOMEM";
- case IAVF_AQ_RC_EACCES:
- return "IAVF_AQ_RC_EACCES";
- case IAVF_AQ_RC_EFAULT:
- return "IAVF_AQ_RC_EFAULT";
- case IAVF_AQ_RC_EBUSY:
- return "IAVF_AQ_RC_EBUSY";
- case IAVF_AQ_RC_EEXIST:
- return "IAVF_AQ_RC_EEXIST";
- case IAVF_AQ_RC_EINVAL:
- return "IAVF_AQ_RC_EINVAL";
- case IAVF_AQ_RC_ENOTTY:
- return "IAVF_AQ_RC_ENOTTY";
- case IAVF_AQ_RC_ENOSPC:
- return "IAVF_AQ_RC_ENOSPC";
- case IAVF_AQ_RC_ENOSYS:
- return "IAVF_AQ_RC_ENOSYS";
- case IAVF_AQ_RC_ERANGE:
- return "IAVF_AQ_RC_ERANGE";
- case IAVF_AQ_RC_EFLUSHED:
- return "IAVF_AQ_RC_EFLUSHED";
- case IAVF_AQ_RC_BAD_ADDR:
- return "IAVF_AQ_RC_BAD_ADDR";
- case IAVF_AQ_RC_EMODE:
- return "IAVF_AQ_RC_EMODE";
- case IAVF_AQ_RC_EFBIG:
- return "IAVF_AQ_RC_EFBIG";
- }
-
- snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
- return hw->err_str;
-}
-
-/**
* iavf_stat_str - convert status err code to a string
* @hw: pointer to the HW structure
* @stat_err: the status error code to convert
@@ -228,7 +168,7 @@ const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err)
void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, void *desc,
void *buffer, u16 buf_len)
{
- struct iavf_aq_desc *aq_desc = (struct iavf_aq_desc *)desc;
+ struct libie_aq_desc *aq_desc = (struct libie_aq_desc *)desc;
u8 *buf = (u8 *)buffer;
if ((!(mask & hw->debug_mask)) || !desc)
@@ -244,11 +184,11 @@ void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, void *desc,
le32_to_cpu(aq_desc->cookie_high),
le32_to_cpu(aq_desc->cookie_low));
iavf_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
- le32_to_cpu(aq_desc->params.internal.param0),
- le32_to_cpu(aq_desc->params.internal.param1));
+ le32_to_cpu(aq_desc->params.generic.param0),
+ le32_to_cpu(aq_desc->params.generic.param1));
iavf_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
- le32_to_cpu(aq_desc->params.external.addr_high),
- le32_to_cpu(aq_desc->params.external.addr_low));
+ le32_to_cpu(aq_desc->params.generic.addr_high),
+ le32_to_cpu(aq_desc->params.generic.addr_low));
if (buffer && aq_desc->datalen) {
u16 len = le16_to_cpu(aq_desc->datalen);
@@ -297,11 +237,11 @@ bool iavf_check_asq_alive(struct iavf_hw *hw)
**/
enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading)
{
- struct iavf_aq_desc desc;
- struct iavf_aqc_queue_shutdown *cmd =
- (struct iavf_aqc_queue_shutdown *)&desc.params.raw;
+ struct iavf_aqc_queue_shutdown *cmd;
+ struct libie_aq_desc desc;
enum iavf_status status;
+ cmd = libie_aq_raw(&desc);
iavf_fill_default_direct_cmd_desc(&desc, iavf_aqc_opc_queue_shutdown);
if (unloading)
@@ -327,12 +267,13 @@ static enum iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw,
u8 *lut, u16 lut_size,
bool set)
{
+ struct iavf_aqc_get_set_rss_lut *cmd_resp;
+ struct libie_aq_desc desc;
enum iavf_status status;
- struct iavf_aq_desc desc;
- struct iavf_aqc_get_set_rss_lut *cmd_resp =
- (struct iavf_aqc_get_set_rss_lut *)&desc.params.raw;
u16 flags;
+ cmd_resp = libie_aq_raw(&desc);
+
if (set)
iavf_fill_default_direct_cmd_desc(&desc,
iavf_aqc_opc_set_rss_lut);
@@ -341,8 +282,8 @@ static enum iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw,
iavf_aqc_opc_get_rss_lut);
/* Indirect command */
- desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_BUF);
- desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_RD);
vsi_id = FIELD_PREP(IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK, vsi_id) |
FIELD_PREP(IAVF_AQC_SET_RSS_LUT_VSI_VALID, 1);
@@ -392,11 +333,12 @@ iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
struct iavf_aqc_get_set_rss_key_data *key,
bool set)
{
- enum iavf_status status;
- struct iavf_aq_desc desc;
- struct iavf_aqc_get_set_rss_key *cmd_resp =
- (struct iavf_aqc_get_set_rss_key *)&desc.params.raw;
u16 key_size = sizeof(struct iavf_aqc_get_set_rss_key_data);
+ struct iavf_aqc_get_set_rss_key *cmd_resp;
+ struct libie_aq_desc desc;
+ enum iavf_status status;
+
+ cmd_resp = libie_aq_raw(&desc);
if (set)
iavf_fill_default_direct_cmd_desc(&desc,
@@ -406,8 +348,8 @@ iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
iavf_aqc_opc_get_rss_key);
/* Indirect command */
- desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_BUF);
- desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_RD);
vsi_id = FIELD_PREP(IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK, vsi_id) |
FIELD_PREP(IAVF_AQC_SET_RSS_KEY_VSI_VALID, 1);
@@ -452,18 +394,18 @@ enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
struct iavf_asq_cmd_details *cmd_details)
{
struct iavf_asq_cmd_details details;
- struct iavf_aq_desc desc;
+ struct libie_aq_desc desc;
enum iavf_status status;
iavf_fill_default_direct_cmd_desc(&desc, iavf_aqc_opc_send_msg_to_pf);
- desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_SI);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_SI);
desc.cookie_high = cpu_to_le32(v_opcode);
desc.cookie_low = cpu_to_le32(v_retval);
if (msglen) {
- desc.flags |= cpu_to_le16((u16)(IAVF_AQ_FLAG_BUF
- | IAVF_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF
+ | LIBIE_AQ_FLAG_RD));
if (msglen > IAVF_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(msglen);
}
if (!cmd_details) {
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 2b2b315205b5..05d72be3fe80 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -1307,14 +1307,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
return iavf_fdir_del_fltr(adapter, false, fsp->location);
}
-/**
- * iavf_adv_rss_parse_hdrs - parses headers from RSS hash input
- * @cmd: ethtool rxnfc command
- *
- * This function parses the rxnfc command and returns intended
- * header types for RSS configuration
- */
-static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd)
+static u32 iavf_adv_rss_parse_hdrs(const struct ethtool_rxfh_fields *cmd)
{
u32 hdrs = IAVF_ADV_RSS_FLOW_SEG_HDR_NONE;
@@ -1350,15 +1343,8 @@ static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd)
return hdrs;
}
-/**
- * iavf_adv_rss_parse_hash_flds - parses hash fields from RSS hash input
- * @cmd: ethtool rxnfc command
- * @symm: true if Symmetric Topelitz is set
- *
- * This function parses the rxnfc command and returns intended hash fields for
- * RSS configuration
- */
-static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd, bool symm)
+static u64
+iavf_adv_rss_parse_hash_flds(const struct ethtool_rxfh_fields *cmd, bool symm)
{
u64 hfld = IAVF_ADV_RSS_HASH_INVALID;
@@ -1416,17 +1402,12 @@ static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd, bool symm)
return hfld;
}
-/**
- * iavf_set_adv_rss_hash_opt - Enable/Disable flow types for RSS hash
- * @adapter: pointer to the VF adapter structure
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the flow input set is supported.
- */
static int
-iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+iavf_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
struct iavf_adv_rss *rss_old, *rss_new;
bool rss_new_add = false;
bool symm = false;
@@ -1493,17 +1474,10 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
return err;
}
-/**
- * iavf_get_adv_rss_hash_opt - Retrieve hash fields for a given flow-type
- * @adapter: pointer to the VF adapter structure
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the flow input set is supported.
- */
static int
-iavf_get_adv_rss_hash_opt(struct iavf_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+iavf_get_rxfh_fields(struct net_device *netdev, struct ethtool_rxfh_fields *cmd)
{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
struct iavf_adv_rss *rss;
u64 hash_flds;
u32 hdrs;
@@ -1568,9 +1542,6 @@ static int iavf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
case ETHTOOL_SRXCLSRLDEL:
ret = iavf_del_fdir_ethtool(adapter, cmd);
break;
- case ETHTOOL_SRXFH:
- ret = iavf_set_adv_rss_hash_opt(adapter, cmd);
- break;
default:
break;
}
@@ -1612,9 +1583,6 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRLALL:
ret = iavf_get_fdir_fltr_ids(adapter, cmd, (u32 *)rule_locs);
break;
- case ETHTOOL_GRXFH:
- ret = iavf_get_adv_rss_hash_opt(adapter, cmd);
- break;
default:
break;
}
@@ -1812,6 +1780,8 @@ static const struct ethtool_ops iavf_ethtool_ops = {
.get_rxfh_indir_size = iavf_get_rxfh_indir_size,
.get_rxfh = iavf_get_rxfh,
.set_rxfh = iavf_set_rxfh,
+ .get_rxfh_fields = iavf_get_rxfh_fields,
+ .set_rxfh_fields = iavf_set_rxfh_fields,
.get_channels = iavf_get_channels,
.set_channels = iavf_set_channels,
.get_rxfh_key_size = iavf_get_rxfh_key_size,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 81d7249d1149..69054af4689a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -50,6 +50,7 @@ MODULE_ALIAS("i40evf");
MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
MODULE_IMPORT_NS("LIBETH");
MODULE_IMPORT_NS("LIBIE");
+MODULE_IMPORT_NS("LIBIE_ADMINQ");
MODULE_LICENSE("GPL v2");
static const struct net_device_ops iavf_netdev_ops;
@@ -528,33 +529,6 @@ static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
}
/**
- * iavf_irq_affinity_notify - Callback for affinity changes
- * @notify: context as to what irq was changed
- * @mask: the new affinity mask
- *
- * This is a callback function used by the irq_set_affinity_notifier function
- * so that we may register to receive changes to the irq affinity masks.
- **/
-static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
-{
- struct iavf_q_vector *q_vector =
- container_of(notify, struct iavf_q_vector, affinity_notify);
-
- cpumask_copy(&q_vector->affinity_mask, mask);
-}
-
-/**
- * iavf_irq_affinity_release - Callback for affinity notifier release
- * @ref: internal core kernel usage
- *
- * This is a callback function used by the irq_set_affinity_notifier function
- * to inform the current notification subscriber that they will no longer
- * receive notifications.
- **/
-static void iavf_irq_affinity_release(struct kref *ref) {}
-
-/**
* iavf_request_traffic_irqs - Initialize MSI-X interrupts
* @adapter: board private structure
* @basename: device basename
@@ -568,7 +542,6 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
unsigned int vector, q_vectors;
unsigned int rx_int_idx = 0, tx_int_idx = 0;
int irq_num, err;
- int cpu;
iavf_irq_disable(adapter);
/* Decrement for Other and TCP Timer vectors */
@@ -603,17 +576,6 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
"Request_irq failed, error: %d\n", err);
goto free_queue_irqs;
}
- /* register for affinity change notifications */
- q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
- q_vector->affinity_notify.release =
- iavf_irq_affinity_release;
- irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
- /* Spread the IRQ affinity hints across online CPUs. Note that
- * get_cpu_mask returns a mask with a permanent lifetime so
- * it's safe to use as a hint for irq_update_affinity_hint.
- */
- cpu = cpumask_local_spread(q_vector->v_idx, -1);
- irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
}
return 0;
@@ -622,8 +584,6 @@ free_queue_irqs:
while (vector) {
vector--;
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
- irq_set_affinity_notifier(irq_num, NULL);
- irq_update_affinity_hint(irq_num, NULL);
free_irq(irq_num, &adapter->q_vectors[vector]);
}
return err;
@@ -665,6 +625,7 @@ static int iavf_request_misc_irq(struct iavf_adapter *adapter)
**/
static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
{
+ struct iavf_q_vector *q_vector;
int vector, irq_num, q_vectors;
if (!adapter->msix_entries)
@@ -673,10 +634,10 @@ static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
q_vectors = adapter->num_msix_vectors - NONQ_VECS;
for (vector = 0; vector < q_vectors; vector++) {
+ q_vector = &adapter->q_vectors[vector];
+ netif_napi_set_irq_locked(&q_vector->napi, -1);
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
- irq_set_affinity_notifier(irq_num, NULL);
- irq_update_affinity_hint(irq_num, NULL);
- free_irq(irq_num, &adapter->q_vectors[vector]);
+ free_irq(irq_num, q_vector);
}
}
@@ -1734,7 +1695,7 @@ static int iavf_config_rss_aq(struct iavf_adapter *adapter)
if (status) {
dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
iavf_stat_str(hw, status),
- iavf_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
return iavf_status_to_errno(status);
}
@@ -1744,7 +1705,7 @@ static int iavf_config_rss_aq(struct iavf_adapter *adapter)
if (status) {
dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
iavf_stat_str(hw, status),
- iavf_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
return iavf_status_to_errno(status);
}
@@ -1823,12 +1784,13 @@ static int iavf_init_rss(struct iavf_adapter *adapter)
/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
if (adapter->vf_res->vf_cap_flags &
VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
- adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
+ adapter->rss_hashcfg =
+ IAVF_DEFAULT_RSS_HASHCFG_EXPANDED;
else
- adapter->hena = IAVF_DEFAULT_RSS_HENA;
+ adapter->rss_hashcfg = IAVF_DEFAULT_RSS_HASHCFG;
- wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
- wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
+ wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->rss_hashcfg);
+ wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->rss_hashcfg >> 32));
}
iavf_fill_rss_lut(adapter);
@@ -1846,7 +1808,7 @@ static int iavf_init_rss(struct iavf_adapter *adapter)
**/
static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
{
- int q_idx = 0, num_q_vectors;
+ int q_idx = 0, num_q_vectors, irq_num;
struct iavf_q_vector *q_vector;
num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
@@ -1856,14 +1818,15 @@ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
return -ENOMEM;
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ irq_num = adapter->msix_entries[q_idx + NONQ_VECS].vector;
q_vector = &adapter->q_vectors[q_idx];
q_vector->adapter = adapter;
q_vector->vsi = &adapter->vsi;
q_vector->v_idx = q_idx;
q_vector->reg_idx = q_idx;
- cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
- netif_napi_add_locked(adapter->netdev, &q_vector->napi,
- iavf_napi_poll);
+ netif_napi_add_config_locked(adapter->netdev, &q_vector->napi,
+ iavf_napi_poll, q_idx);
+ netif_napi_set_irq_locked(&q_vector->napi, irq_num);
}
return 0;
@@ -2195,12 +2158,12 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
return 0;
}
- if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
- iavf_get_hena(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_GET_RSS_HASHCFG) {
+ iavf_get_rss_hashcfg(adapter);
return 0;
}
- if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
- iavf_set_hena(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_HASHCFG) {
+ iavf_set_rss_hashcfg(adapter);
return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
@@ -5387,6 +5350,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_alloc_etherdev;
}
+ netif_set_affinity_auto(netdev);
SET_NETDEV_DEV(netdev, &pdev->dev);
pci_set_drvdata(pdev, netdev);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_prototype.h b/drivers/net/ethernet/intel/iavf/iavf_prototype.h
index cac9d1a35a52..7f9f9dbf959a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_prototype.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_prototype.h
@@ -22,7 +22,7 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
struct iavf_arq_event_info *e,
u16 *events_pending);
enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
- struct iavf_aq_desc *desc,
+ struct libie_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct iavf_asq_cmd_details *cmd_details);
@@ -34,7 +34,6 @@ void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask,
bool iavf_check_asq_alive(struct iavf_hw *hw);
enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading);
-const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err);
const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err);
enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 422312b8b54a..363c42bf3dcf 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -723,7 +723,7 @@ static void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
for (u32 i = rx_ring->next_to_clean; i != rx_ring->next_to_use; ) {
const struct libeth_fqe *rx_fqes = &rx_ring->rx_fqes[i];
- page_pool_put_full_page(rx_ring->pp, rx_fqes->page, false);
+ libeth_rx_recycle_slow(rx_fqes->netmem);
if (unlikely(++i == rx_ring->count))
i = 0;
@@ -1197,10 +1197,11 @@ static void iavf_add_rx_frag(struct sk_buff *skb,
const struct libeth_fqe *rx_buffer,
unsigned int size)
{
- u32 hr = rx_buffer->page->pp->p.offset;
+ u32 hr = netmem_get_pp(rx_buffer->netmem)->p.offset;
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
- rx_buffer->offset + hr, size, rx_buffer->truesize);
+ skb_add_rx_frag_netmem(skb, skb_shinfo(skb)->nr_frags,
+ rx_buffer->netmem, rx_buffer->offset + hr,
+ size, rx_buffer->truesize);
}
/**
@@ -1214,12 +1215,13 @@ static void iavf_add_rx_frag(struct sk_buff *skb,
static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer,
unsigned int size)
{
- u32 hr = rx_buffer->page->pp->p.offset;
+ struct page *buf_page = __netmem_to_page(rx_buffer->netmem);
+ u32 hr = pp_page_to_nmdesc(buf_page)->pp->p.offset;
struct sk_buff *skb;
void *va;
/* prefetch first cache line of first page */
- va = page_address(rx_buffer->page) + rx_buffer->offset;
+ va = page_address(buf_page) + rx_buffer->offset;
net_prefetch(va + hr);
/* build an skb around the page buffer */
@@ -1648,7 +1650,8 @@ int iavf_napi_poll(struct napi_struct *napi, int budget)
* continue to poll, otherwise we must stop polling so the
* interrupt can move to the correct cpu.
*/
- if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
+ if (!cpumask_test_cpu(cpu_id,
+ &q_vector->napi.config->affinity_mask)) {
/* Tell napi that we are done polling */
napi_complete_done(napi, work_done);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
index 79ad554f2d53..df49b0b1d54a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
@@ -4,6 +4,8 @@
#ifndef _IAVF_TXRX_H_
#define _IAVF_TXRX_H_
+#include <linux/net/intel/libie/pctype.h>
+
/* Interrupt Throttling and Rate Limiting Goodies */
#define IAVF_DEFAULT_IRQ_WORK 256
@@ -59,26 +61,26 @@ enum iavf_dyn_idx_t {
#define IAVF_PE_ITR IAVF_IDX_ITR2
/* Supported RSS offloads */
-#define IAVF_DEFAULT_RSS_HENA ( \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_UDP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV4) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_UDP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV6) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_L2_PAYLOAD))
-
-#define IAVF_DEFAULT_RSS_HENA_EXPANDED (IAVF_DEFAULT_RSS_HENA | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+#define IAVF_DEFAULT_RSS_HASHCFG ( \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_L2_PAYLOAD))
+
+#define IAVF_DEFAULT_RSS_HASHCFG_EXPANDED (IAVF_DEFAULT_RSS_HASHCFG | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IAVF_RX_INCREMENT(r, i) \
diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h
index f9e1319620f4..1d8cf29cb65a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_type.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_type.h
@@ -19,7 +19,7 @@
/* forward declaration */
struct iavf_hw;
-typedef void (*IAVF_ADMINQ_CALLBACK)(struct iavf_hw *, struct iavf_aq_desc *);
+typedef void (*IAVF_ADMINQ_CALLBACK)(struct iavf_hw *, struct libie_aq_desc *);
/* Data type manipulation macros. */
@@ -463,38 +463,6 @@ enum iavf_tx_ctx_desc_cmd_bits {
IAVF_TX_CTX_DESC_SWPE = 0x40
};
-/* Packet Classifier Types for filters */
-enum iavf_filter_pctype {
- /* Note: Values 0-28 are reserved for future use.
- * Value 29, 30, 32 are not supported on XL710 and X710.
- */
- IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
- IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
- IAVF_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
- IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
- IAVF_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
- IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
- IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
- IAVF_FILTER_PCTYPE_FRAG_IPV4 = 36,
- /* Note: Values 37-38 are reserved for future use.
- * Value 39, 40, 42 are not supported on XL710 and X710.
- */
- IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
- IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
- IAVF_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
- IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
- IAVF_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
- IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
- IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
- IAVF_FILTER_PCTYPE_FRAG_IPV6 = 46,
- /* Note: Value 47 is reserved for future use */
- IAVF_FILTER_PCTYPE_FCOE_OX = 48,
- IAVF_FILTER_PCTYPE_FCOE_RX = 49,
- IAVF_FILTER_PCTYPE_FCOE_OTHER = 50,
- /* Note: Values 51-62 are reserved for future use */
- IAVF_FILTER_PCTYPE_L2_PAYLOAD = 63,
-};
-
#define IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT 30
#define IAVF_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT)
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 07f0d0a0f1e2..34a422a4a29c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -29,7 +29,7 @@ static int iavf_send_pf_msg(struct iavf_adapter *adapter,
if (status)
dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, status %s, aq_err %s\n",
op, iavf_stat_str(hw, status),
- iavf_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
return iavf_status_to_errno(status);
}
@@ -1145,12 +1145,12 @@ void iavf_request_stats(struct iavf_adapter *adapter)
}
/**
- * iavf_get_hena
+ * iavf_get_rss_hashcfg
* @adapter: adapter structure
*
- * Request hash enable capabilities from PF
+ * Request RSS Hash enable bits from PF
**/
-void iavf_get_hena(struct iavf_adapter *adapter)
+void iavf_get_rss_hashcfg(struct iavf_adapter *adapter)
{
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -1158,20 +1158,20 @@ void iavf_get_hena(struct iavf_adapter *adapter)
adapter->current_op);
return;
}
- adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS;
- adapter->aq_required &= ~IAVF_FLAG_AQ_GET_HENA;
- iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, NULL, 0);
+ adapter->current_op = VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_GET_RSS_HASHCFG;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, NULL, 0);
}
/**
- * iavf_set_hena
+ * iavf_set_rss_hashcfg
* @adapter: adapter structure
*
* Request the PF to set our RSS hash capabilities
**/
-void iavf_set_hena(struct iavf_adapter *adapter)
+void iavf_set_rss_hashcfg(struct iavf_adapter *adapter)
{
- struct virtchnl_rss_hena vrh;
+ struct virtchnl_rss_hashcfg vrh;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -1179,10 +1179,10 @@ void iavf_set_hena(struct iavf_adapter *adapter)
adapter->current_op);
return;
}
- vrh.hena = adapter->hena;
- adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA;
- adapter->aq_required &= ~IAVF_FLAG_AQ_SET_HENA;
- iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, (u8 *)&vrh,
+ vrh.hashcfg = adapter->rss_hashcfg;
+ adapter->current_op = VIRTCHNL_OP_SET_RSS_HASHCFG;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_HASHCFG;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HASHCFG, (u8 *)&vrh,
sizeof(vrh));
}
@@ -2752,11 +2752,12 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
if (v_opcode != adapter->current_op)
return;
break;
- case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
- struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
+ case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS: {
+ struct virtchnl_rss_hashcfg *vrh =
+ (struct virtchnl_rss_hashcfg *)msg;
if (msglen == sizeof(*vrh))
- adapter->hena = vrh->hena;
+ adapter->rss_hashcfg = vrh->hashcfg;
else
dev_warn(&adapter->pdev->dev,
"Invalid message %d from PF\n", v_opcode);
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 9e0d9f710441..d0f9c9492363 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -53,7 +53,7 @@ ice-$(CONFIG_PCI_IOV) += \
ice_vf_mbx.o \
ice_vf_vsi_vlan_ops.o \
ice_vf_lib.o
-ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o ice_dpll.o
+ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o ice_dpll.o ice_tspll.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index 4af60e2f37df..fb2de521731a 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -293,7 +293,7 @@ static int ice_devlink_info_get(struct devlink *devlink,
err = ice_discover_dev_caps(hw, &ctx->dev_caps);
if (err) {
dev_dbg(dev, "Failed to discover device capabilities, status %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Unable to discover device capabilities");
goto out_free_ctx;
}
@@ -302,7 +302,7 @@ static int ice_devlink_info_get(struct devlink *devlink,
err = ice_get_inactive_orom_ver(hw, &ctx->pending_orom);
if (err) {
dev_dbg(dev, "Unable to read inactive Option ROM version data, status %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
/* disable display of pending Option ROM */
ctx->dev_caps.common_cap.nvm_update_pending_orom = false;
@@ -313,7 +313,7 @@ static int ice_devlink_info_get(struct devlink *devlink,
err = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm);
if (err) {
dev_dbg(dev, "Unable to read inactive NVM version data, status %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
/* disable display of pending Option ROM */
ctx->dev_caps.common_cap.nvm_update_pending_nvm = false;
@@ -324,7 +324,7 @@ static int ice_devlink_info_get(struct devlink *devlink,
err = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist);
if (err) {
dev_dbg(dev, "Unable to read inactive Netlist version data, status %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
/* disable display of pending Option ROM */
ctx->dev_caps.common_cap.nvm_update_pending_netlist = false;
@@ -440,7 +440,7 @@ ice_devlink_reload_empr_start(struct ice_pf *pf,
err = ice_aq_nvm_update_empr(hw);
if (err) {
dev_err(dev, "Failed to trigger EMP device reset to reload firmware, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to trigger EMP device reset to reload firmware");
return err;
}
diff --git a/drivers/net/ethernet/intel/ice/devlink/health.c b/drivers/net/ethernet/intel/ice/devlink/health.c
index 19c3d37aa768..ab519c0f28bf 100644
--- a/drivers/net/ethernet/intel/ice/devlink/health.c
+++ b/drivers/net/ethernet/intel/ice/devlink/health.c
@@ -204,7 +204,7 @@ static void ice_config_health_events(struct ice_pf *pf, bool enable)
if (ret)
dev_err(ice_pf_to_dev(pf), "Failed to %s firmware health events, err %d aq_err %s\n",
str_enable_disable(enable), ret,
- ice_aq_str(pf->hw.adminq.sq_last_status));
+ libie_aq_str(pf->hw.adminq.sq_last_status));
}
/**
@@ -217,10 +217,12 @@ static void ice_config_health_events(struct ice_pf *pf, bool enable)
void ice_process_health_status_event(struct ice_pf *pf, struct ice_rq_event_info *event)
{
const struct ice_aqc_health_status_elem *health_info;
+ const struct ice_aqc_get_health_status *cmd;
u16 count;
health_info = (struct ice_aqc_health_status_elem *)event->msg_buf;
- count = le16_to_cpu(event->desc.params.get_health_status.health_status_count);
+ cmd = libie_aq_raw(&event->desc);
+ count = le16_to_cpu(cmd->health_status_count);
if (count > (event->buf_len / sizeof(*health_info))) {
dev_err(ice_pf_to_dev(pf), "Received a health status event with invalid element count\n");
diff --git a/drivers/net/ethernet/intel/ice/devlink/port.c b/drivers/net/ethernet/intel/ice/devlink/port.c
index 767419a67fef..63fb36fc4b3d 100644
--- a/drivers/net/ethernet/intel/ice/devlink/port.c
+++ b/drivers/net/ethernet/intel/ice/devlink/port.c
@@ -30,6 +30,8 @@ static const char *ice_devlink_port_opt_speed_str(u8 speed)
return "10";
case ICE_AQC_PORT_OPT_MAX_LANE_25G:
return "25";
+ case ICE_AQC_PORT_OPT_MAX_LANE_40G:
+ return "40";
case ICE_AQC_PORT_OPT_MAX_LANE_50G:
return "50";
case ICE_AQC_PORT_OPT_MAX_LANE_100G:
diff --git a/drivers/net/ethernet/intel/ice/devlink/port.h b/drivers/net/ethernet/intel/ice/devlink/port.h
index d60efc340945..e89ddd60eeac 100644
--- a/drivers/net/ethernet/intel/ice/devlink/port.h
+++ b/drivers/net/ethernet/intel/ice/devlink/port.h
@@ -11,7 +11,7 @@
* struct ice_dynamic_port - Track dynamically added devlink port instance
* @hw_addr: the HW address for this port
* @active: true if the port has been activated
- * @attached: true it the prot is attached
+ * @attached: true if the prot is attached
* @devlink_port: the associated devlink port structure
* @pf: pointer to the PF private structure
* @vsi: the VSI associated with this port
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index ddd0ad68185b..2098f00b3cd3 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -67,6 +67,7 @@
#include "ice_sriov.h"
#include "ice_vf_mbx.h"
#include "ice_ptp.h"
+#include "ice_tspll.h"
#include "ice_fdir.h"
#include "ice_xsk.h"
#include "ice_arfs.h"
@@ -614,6 +615,7 @@ struct ice_pf {
u16 globr_count; /* Global reset count */
u16 empr_count; /* EMP reset count */
u16 pfr_count; /* PF reset count */
+ u32 link_down_events;
u8 wol_ena : 1; /* software state of WoL */
u32 wakeup_reason; /* last wakeup reason */
@@ -958,7 +960,6 @@ int ice_plug_aux_dev(struct ice_pf *pf);
void ice_unplug_aux_dev(struct ice_pf *pf);
int ice_init_rdma(struct ice_pf *pf);
void ice_deinit_rdma(struct ice_pf *pf);
-const char *ice_aq_str(enum ice_aq_err aq_err);
bool ice_is_wol_supported(struct ice_hw *hw);
void ice_fdir_del_all_fltrs(struct ice_vsi *vsi);
int
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c
index 66e070095d1b..9e4adc43e474 100644
--- a/drivers/net/ethernet/intel/ice/ice_adapter.c
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.c
@@ -32,6 +32,7 @@ static struct ice_adapter *ice_adapter_new(u64 dsn)
adapter->device_serial_number = dsn;
spin_lock_init(&adapter->ptp_gltsyn_time_lock);
+ spin_lock_init(&adapter->txq_ctx_lock);
refcount_set(&adapter->refcount, 1);
mutex_init(&adapter->ports.lock);
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.h b/drivers/net/ethernet/intel/ice/ice_adapter.h
index ac15c0d2bc1a..db66d03c9f96 100644
--- a/drivers/net/ethernet/intel/ice/ice_adapter.h
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.h
@@ -27,9 +27,10 @@ struct ice_port_list {
/**
* struct ice_adapter - PCI adapter resources shared across PFs
+ * @refcount: Reference count. struct ice_pf objects hold the references.
* @ptp_gltsyn_time_lock: Spinlock protecting access to the GLTSYN_TIME
* register of the PTP clock.
- * @refcount: Reference count. struct ice_pf objects hold the references.
+ * @txq_ctx_lock: Spinlock protecting access to the GLCOMM_QTX_CNTX_CTL register
* @ctrl_pf: Control PF of the adapter
* @ports: Ports list
* @device_serial_number: DSN cached for collision detection on 32bit systems
@@ -38,6 +39,8 @@ struct ice_adapter {
refcount_t refcount;
/* For access to the GLTSYN_TIME register */
spinlock_t ptp_gltsyn_time_lock;
+ /* For access to GLCOMM_QTX_CNTX_CTL register */
+ spinlock_t txq_ctx_lock;
struct ice_pf *ctrl_pf;
struct ice_port_list ports;
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index bdee499f991a..3bd3ea3af888 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -4,6 +4,8 @@
#ifndef _ICE_ADMINQ_CMD_H_
#define _ICE_ADMINQ_CMD_H_
+#include <linux/net/intel/libie/adminq.h>
+
/* This header file defines the Admin Queue commands, error codes and
* descriptor format. It is shared between Firmware and Software.
*/
@@ -14,42 +16,22 @@
#define ICE_RXQ_CTX_SIZE_DWORDS 8
#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
-#define ICE_TXQ_CTX_SZ 22
typedef struct __packed { u8 buf[ICE_RXQ_CTX_SZ]; } ice_rxq_ctx_buf_t;
+
+/* The Tx queue context is 40 bytes, and includes some internal state. The
+ * Admin Queue buffers don't include the internal state, so only include the
+ * first 22 bytes of the context.
+ */
+#define ICE_TXQ_CTX_SZ 22
+
typedef struct __packed { u8 buf[ICE_TXQ_CTX_SZ]; } ice_txq_ctx_buf_t;
-struct ice_aqc_generic {
- __le32 param0;
- __le32 param1;
- __le32 addr_high;
- __le32 addr_low;
-};
+#define ICE_TXQ_CTX_FULL_SIZE_DWORDS 10
+#define ICE_TXQ_CTX_FULL_SZ \
+ (ICE_TXQ_CTX_FULL_SIZE_DWORDS * sizeof(u32))
-/* Get version (direct 0x0001) */
-struct ice_aqc_get_ver {
- __le32 rom_ver;
- __le32 fw_build;
- u8 fw_branch;
- u8 fw_major;
- u8 fw_minor;
- u8 fw_patch;
- u8 api_branch;
- u8 api_major;
- u8 api_minor;
- u8 api_patch;
-};
-
-/* Send driver version (indirect 0x0002) */
-struct ice_aqc_driver_ver {
- u8 major_ver;
- u8 minor_ver;
- u8 build_ver;
- u8 subbuild_ver;
- u8 reserved[4];
- __le32 addr_high;
- __le32 addr_low;
-};
+typedef struct __packed { u8 buf[ICE_TXQ_CTX_FULL_SZ]; } ice_txq_ctx_buf_full_t;
/* Queue Shutdown (direct 0x0003) */
struct ice_aqc_q_shutdown {
@@ -58,94 +40,6 @@ struct ice_aqc_q_shutdown {
u8 reserved[15];
};
-/* Request resource ownership (direct 0x0008)
- * Release resource ownership (direct 0x0009)
- */
-struct ice_aqc_req_res {
- __le16 res_id;
-#define ICE_AQC_RES_ID_NVM 1
-#define ICE_AQC_RES_ID_SDP 2
-#define ICE_AQC_RES_ID_CHNG_LOCK 3
-#define ICE_AQC_RES_ID_GLBL_LOCK 4
- __le16 access_type;
-#define ICE_AQC_RES_ACCESS_READ 1
-#define ICE_AQC_RES_ACCESS_WRITE 2
-
- /* Upon successful completion, FW writes this value and driver is
- * expected to release resource before timeout. This value is provided
- * in milliseconds.
- */
- __le32 timeout;
-#define ICE_AQ_RES_NVM_READ_DFLT_TIMEOUT_MS 3000
-#define ICE_AQ_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000
-#define ICE_AQ_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000
-#define ICE_AQ_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000
- /* For SDP: pin ID of the SDP */
- __le32 res_number;
- /* Status is only used for ICE_AQC_RES_ID_GLBL_LOCK */
- __le16 status;
-#define ICE_AQ_RES_GLBL_SUCCESS 0
-#define ICE_AQ_RES_GLBL_IN_PROG 1
-#define ICE_AQ_RES_GLBL_DONE 2
- u8 reserved[2];
-};
-
-/* Get function capabilities (indirect 0x000A)
- * Get device capabilities (indirect 0x000B)
- */
-struct ice_aqc_list_caps {
- u8 cmd_flags;
- u8 pf_index;
- u8 reserved[2];
- __le32 count;
- __le32 addr_high;
- __le32 addr_low;
-};
-
-/* Device/Function buffer entry, repeated per reported capability */
-struct ice_aqc_list_caps_elem {
- __le16 cap;
-#define ICE_AQC_CAPS_VALID_FUNCTIONS 0x0005
-#define ICE_AQC_CAPS_SRIOV 0x0012
-#define ICE_AQC_CAPS_VF 0x0013
-#define ICE_AQC_CAPS_VSI 0x0017
-#define ICE_AQC_CAPS_DCB 0x0018
-#define ICE_AQC_CAPS_RSS 0x0040
-#define ICE_AQC_CAPS_RXQS 0x0041
-#define ICE_AQC_CAPS_TXQS 0x0042
-#define ICE_AQC_CAPS_MSIX 0x0043
-#define ICE_AQC_CAPS_FD 0x0045
-#define ICE_AQC_CAPS_1588 0x0046
-#define ICE_AQC_CAPS_MAX_MTU 0x0047
-#define ICE_AQC_CAPS_NVM_VER 0x0048
-#define ICE_AQC_CAPS_PENDING_NVM_VER 0x0049
-#define ICE_AQC_CAPS_OROM_VER 0x004A
-#define ICE_AQC_CAPS_PENDING_OROM_VER 0x004B
-#define ICE_AQC_CAPS_NET_VER 0x004C
-#define ICE_AQC_CAPS_PENDING_NET_VER 0x004D
-#define ICE_AQC_CAPS_RDMA 0x0051
-#define ICE_AQC_CAPS_SENSOR_READING 0x0067
-#define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076
-#define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
-#define ICE_AQC_CAPS_NVM_MGMT 0x0080
-#define ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE 0x0085
-#define ICE_AQC_CAPS_NAC_TOPOLOGY 0x0087
-#define ICE_AQC_CAPS_FW_LAG_SUPPORT 0x0092
-#define ICE_AQC_BIT_ROCEV2_LAG 0x01
-#define ICE_AQC_BIT_SRIOV_LAG 0x02
-
- u8 major_ver;
- u8 minor_ver;
- /* Number of resources described by this capability */
- __le32 number;
- /* Only meaningful for some types of resources */
- __le32 logical_id;
- /* Only meaningful for some types of resources */
- __le32 phys_id;
- __le64 rsvd1;
- __le64 rsvd2;
-};
-
/* Manage MAC address, read command - indirect (0x0107)
* This struct is also used for the response
*/
@@ -1672,6 +1566,7 @@ struct ice_aqc_get_port_options_elem {
#define ICE_AQC_PORT_OPT_MAX_LANE_50G 6
#define ICE_AQC_PORT_OPT_MAX_LANE_100G 7
#define ICE_AQC_PORT_OPT_MAX_LANE_200G 8
+#define ICE_AQC_PORT_OPT_MAX_LANE_40G 9
u8 global_scid[2];
u8 phy_scid[2];
@@ -2272,6 +2167,22 @@ struct ice_aqc_get_pkg_info_resp {
struct ice_aqc_get_pkg_info pkg_info[];
};
+#define ICE_CGU_INPUT_PHASE_OFFSET_BYTES 6
+
+struct ice_cgu_input_measure {
+ u8 phase_offset[ICE_CGU_INPUT_PHASE_OFFSET_BYTES];
+ __le32 freq;
+} __packed __aligned(sizeof(__le16));
+
+#define ICE_AQC_GET_CGU_IN_MEAS_DPLL_IDX_M ICE_M(0xf, 0)
+
+/* Get CGU input measure command response data structure (indirect 0x0C59) */
+struct ice_aqc_get_cgu_input_measure {
+ u8 dpll_idx_opt;
+ u8 length;
+ u8 rsvd[6];
+};
+
#define ICE_AQC_GET_CGU_MAX_PHASE_ADJ GENMASK(30, 0)
/* Get CGU abilities command response data structure (indirect 0x0C61) */
@@ -2288,6 +2199,8 @@ struct ice_aqc_get_cgu_abilities {
u8 rsvd[3];
};
+#define ICE_AQC_CGU_IN_CFG_FLG2_REFSYNC_EN BIT(7)
+
/* Set CGU input config (direct 0x0C62) */
struct ice_aqc_set_cgu_input_config {
u8 input_idx;
@@ -2641,151 +2554,6 @@ struct ice_aqc_fw_log_cfg_resp {
u8 rsvd0;
};
-/**
- * struct ice_aq_desc - Admin Queue (AQ) descriptor
- * @flags: ICE_AQ_FLAG_* flags
- * @opcode: AQ command opcode
- * @datalen: length in bytes of indirect/external data buffer
- * @retval: return value from firmware
- * @cookie_high: opaque data high-half
- * @cookie_low: opaque data low-half
- * @params: command-specific parameters
- *
- * Descriptor format for commands the driver posts on the Admin Transmit Queue
- * (ATQ). The firmware writes back onto the command descriptor and returns
- * the result of the command. Asynchronous events that are not an immediate
- * result of the command are written to the Admin Receive Queue (ARQ) using
- * the same descriptor format. Descriptors are in little-endian notation with
- * 32-bit words.
- */
-struct ice_aq_desc {
- __le16 flags;
- __le16 opcode;
- __le16 datalen;
- __le16 retval;
- __le32 cookie_high;
- __le32 cookie_low;
- union {
- u8 raw[16];
- struct ice_aqc_generic generic;
- struct ice_aqc_get_ver get_ver;
- struct ice_aqc_driver_ver driver_ver;
- struct ice_aqc_q_shutdown q_shutdown;
- struct ice_aqc_req_res res_owner;
- struct ice_aqc_manage_mac_read mac_read;
- struct ice_aqc_manage_mac_write mac_write;
- struct ice_aqc_clear_pxe clear_pxe;
- struct ice_aqc_list_caps get_cap;
- struct ice_aqc_get_phy_caps get_phy;
- struct ice_aqc_set_phy_cfg set_phy;
- struct ice_aqc_restart_an restart_an;
- struct ice_aqc_set_phy_rec_clk_out set_phy_rec_clk_out;
- struct ice_aqc_get_phy_rec_clk_out get_phy_rec_clk_out;
- struct ice_aqc_get_sensor_reading get_sensor_reading;
- struct ice_aqc_get_sensor_reading_resp get_sensor_reading_resp;
- struct ice_aqc_gpio read_write_gpio;
- struct ice_aqc_sff_eeprom read_write_sff_param;
- struct ice_aqc_set_port_id_led set_port_id_led;
- struct ice_aqc_get_port_options get_port_options;
- struct ice_aqc_set_port_option set_port_option;
- struct ice_aqc_get_sw_cfg get_sw_conf;
- struct ice_aqc_set_port_params set_port_params;
- struct ice_aqc_sw_rules sw_rules;
- struct ice_aqc_add_get_recipe add_get_recipe;
- struct ice_aqc_recipe_to_profile recipe_to_profile;
- struct ice_aqc_get_topo get_topo;
- struct ice_aqc_sched_elem_cmd sched_elem_cmd;
- struct ice_aqc_query_txsched_res query_sched_res;
- struct ice_aqc_query_port_ets port_ets;
- struct ice_aqc_rl_profile rl_profile;
- struct ice_aqc_nvm nvm;
- struct ice_aqc_nvm_checksum nvm_checksum;
- struct ice_aqc_nvm_pkg_data pkg_data;
- struct ice_aqc_nvm_pass_comp_tbl pass_comp_tbl;
- struct ice_aqc_pf_vf_msg virt;
- struct ice_aqc_set_query_pfc_mode set_query_pfc_mode;
- struct ice_aqc_lldp_get_mib lldp_get_mib;
- struct ice_aqc_lldp_set_mib_change lldp_set_event;
- struct ice_aqc_lldp_stop lldp_stop;
- struct ice_aqc_lldp_start lldp_start;
- struct ice_aqc_lldp_set_local_mib lldp_set_mib;
- struct ice_aqc_lldp_stop_start_specific_agent lldp_agent_ctrl;
- struct ice_aqc_lldp_filter_ctrl lldp_filter_ctrl;
- struct ice_aqc_get_set_rss_lut get_set_rss_lut;
- struct ice_aqc_get_set_rss_key get_set_rss_key;
- struct ice_aqc_neigh_dev_req neigh_dev;
- struct ice_aqc_add_txqs add_txqs;
- struct ice_aqc_dis_txqs dis_txqs;
- struct ice_aqc_cfg_txqs cfg_txqs;
- struct ice_aqc_add_rdma_qset add_rdma_qset;
- struct ice_aqc_add_get_update_free_vsi vsi_cmd;
- struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
- struct ice_aqc_download_pkg download_pkg;
- struct ice_aqc_set_cgu_input_config set_cgu_input_config;
- struct ice_aqc_get_cgu_input_config get_cgu_input_config;
- struct ice_aqc_set_cgu_output_config set_cgu_output_config;
- struct ice_aqc_get_cgu_output_config get_cgu_output_config;
- struct ice_aqc_get_cgu_dpll_status get_cgu_dpll_status;
- struct ice_aqc_set_cgu_dpll_config set_cgu_dpll_config;
- struct ice_aqc_set_cgu_ref_prio set_cgu_ref_prio;
- struct ice_aqc_get_cgu_ref_prio get_cgu_ref_prio;
- struct ice_aqc_get_cgu_info get_cgu_info;
- struct ice_aqc_driver_shared_params drv_shared_params;
- struct ice_aqc_fw_log fw_log;
- struct ice_aqc_set_mac_lb set_mac_lb;
- struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
- struct ice_aqc_set_mac_cfg set_mac_cfg;
- struct ice_aqc_set_event_mask set_event_mask;
- struct ice_aqc_get_link_status get_link_status;
- struct ice_aqc_event_lan_overflow lan_overflow;
- struct ice_aqc_get_link_topo get_link_topo;
- struct ice_aqc_set_health_status_cfg set_health_status_cfg;
- struct ice_aqc_get_health_status get_health_status;
- struct ice_aqc_dnl_call_command dnl_call;
- struct ice_aqc_i2c read_write_i2c;
- struct ice_aqc_read_i2c_resp read_i2c_resp;
- struct ice_aqc_get_set_tx_topo get_set_tx_topo;
- } params;
-};
-
-/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */
-#define ICE_AQ_LG_BUF 512
-
-#define ICE_AQ_FLAG_DD_S 0
-#define ICE_AQ_FLAG_CMP_S 1
-#define ICE_AQ_FLAG_ERR_S 2
-#define ICE_AQ_FLAG_LB_S 9
-#define ICE_AQ_FLAG_RD_S 10
-#define ICE_AQ_FLAG_BUF_S 12
-#define ICE_AQ_FLAG_SI_S 13
-
-#define ICE_AQ_FLAG_DD BIT(ICE_AQ_FLAG_DD_S) /* 0x1 */
-#define ICE_AQ_FLAG_CMP BIT(ICE_AQ_FLAG_CMP_S) /* 0x2 */
-#define ICE_AQ_FLAG_ERR BIT(ICE_AQ_FLAG_ERR_S) /* 0x4 */
-#define ICE_AQ_FLAG_LB BIT(ICE_AQ_FLAG_LB_S) /* 0x200 */
-#define ICE_AQ_FLAG_RD BIT(ICE_AQ_FLAG_RD_S) /* 0x400 */
-#define ICE_AQ_FLAG_BUF BIT(ICE_AQ_FLAG_BUF_S) /* 0x1000 */
-#define ICE_AQ_FLAG_SI BIT(ICE_AQ_FLAG_SI_S) /* 0x2000 */
-
-/* error codes */
-enum ice_aq_err {
- ICE_AQ_RC_OK = 0, /* Success */
- ICE_AQ_RC_EPERM = 1, /* Operation not permitted */
- ICE_AQ_RC_ENOENT = 2, /* No such element */
- ICE_AQ_RC_ENOMEM = 9, /* Out of memory */
- ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */
- ICE_AQ_RC_EEXIST = 13, /* Object already exists */
- ICE_AQ_RC_EINVAL = 14, /* Invalid argument */
- ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
- ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */
- ICE_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
- ICE_AQ_RC_ENOSEC = 24, /* Missing security manifest */
- ICE_AQ_RC_EBADSIG = 25, /* Bad RSA signature */
- ICE_AQ_RC_ESVN = 26, /* SVN number prohibits this package */
- ICE_AQ_RC_EBADMAN = 27, /* Manifest hash mismatch */
- ICE_AQ_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */
-};
-
/* Admin Queue command opcodes */
enum ice_adminq_opc {
/* AQ commands */
@@ -2927,6 +2695,7 @@ enum ice_adminq_opc {
ice_aqc_opc_get_pkg_info_list = 0x0C43,
/* 1588/SyncE commands/events */
+ ice_aqc_opc_get_cgu_input_measure = 0x0C59,
ice_aqc_opc_get_cgu_abilities = 0x0C61,
ice_aqc_opc_set_cgu_input_config = 0x0C62,
ice_aqc_opc_get_cgu_input_config = 0x0C63,
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 6db4ad8fc70b..c5da8e9cc0a0 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -250,7 +250,7 @@ static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8
return ring->q_index - ring->ch->base_q;
/* Idea here for calculation is that we subtract the number of queue
- * count from TC that ring belongs to from it's absolute queue index
+ * count from TC that ring belongs to from its absolute queue index
* and as a result we get the queue's index within TC.
*/
return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset;
@@ -623,7 +623,10 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
return 0;
}
- ice_alloc_rx_bufs(ring, num_bufs);
+ if (ring->vsi->type == ICE_VSI_CTRL)
+ ice_init_ctrl_rx_descs(ring, num_bufs);
+ else
+ ice_alloc_rx_bufs(ring, num_bufs);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_cgu_regs.h b/drivers/net/ethernet/intel/ice/ice_cgu_regs.h
deleted file mode 100644
index 10d9d74f3545..000000000000
--- a/drivers/net/ethernet/intel/ice/ice_cgu_regs.h
+++ /dev/null
@@ -1,181 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2018-2021, Intel Corporation. */
-
-#ifndef _ICE_CGU_REGS_H_
-#define _ICE_CGU_REGS_H_
-
-#define NAC_CGU_DWORD9 0x24
-union nac_cgu_dword9 {
- struct {
- u32 time_ref_freq_sel : 3;
- u32 clk_eref1_en : 1;
- u32 clk_eref0_en : 1;
- u32 time_ref_en : 1;
- u32 time_sync_en : 1;
- u32 one_pps_out_en : 1;
- u32 clk_ref_synce_en : 1;
- u32 clk_synce1_en : 1;
- u32 clk_synce0_en : 1;
- u32 net_clk_ref1_en : 1;
- u32 net_clk_ref0_en : 1;
- u32 clk_synce1_amp : 2;
- u32 misc6 : 1;
- u32 clk_synce0_amp : 2;
- u32 one_pps_out_amp : 2;
- u32 misc24 : 12;
- };
- u32 val;
-};
-
-#define NAC_CGU_DWORD16_E825C 0x40
-union nac_cgu_dword16_e825c {
- struct {
- u32 synce_remndr : 6;
- u32 synce_phlmt_en : 1;
- u32 misc13 : 17;
- u32 tspll_ck_refclkfreq : 8;
- };
- u32 val;
-};
-
-#define NAC_CGU_DWORD19 0x4c
-union nac_cgu_dword19 {
- struct {
- u32 tspll_fbdiv_intgr : 8;
- u32 fdpll_ulck_thr : 5;
- u32 misc15 : 3;
- u32 tspll_ndivratio : 4;
- u32 tspll_iref_ndivratio : 3;
- u32 misc19 : 1;
- u32 japll_ndivratio : 4;
- u32 japll_iref_ndivratio : 3;
- u32 misc27 : 1;
- };
- u32 val;
-};
-
-#define NAC_CGU_DWORD22 0x58
-union nac_cgu_dword22 {
- struct {
- u32 fdpll_frac_div_out_nc : 2;
- u32 fdpll_lock_int_for : 1;
- u32 synce_hdov_int_for : 1;
- u32 synce_lock_int_for : 1;
- u32 fdpll_phlead_slip_nc : 1;
- u32 fdpll_acc1_ovfl_nc : 1;
- u32 fdpll_acc2_ovfl_nc : 1;
- u32 synce_status_nc : 6;
- u32 fdpll_acc1f_ovfl : 1;
- u32 misc18 : 1;
- u32 fdpllclk_div : 4;
- u32 time1588clk_div : 4;
- u32 synceclk_div : 4;
- u32 synceclk_sel_div2 : 1;
- u32 fdpllclk_sel_div2 : 1;
- u32 time1588clk_sel_div2 : 1;
- u32 misc3 : 1;
- };
- u32 val;
-};
-
-#define NAC_CGU_DWORD23_E825C 0x5C
-union nac_cgu_dword23_e825c {
- struct {
- u32 cgupll_fbdiv_intgr : 10;
- u32 ux56pll_fbdiv_intgr : 10;
- u32 misc20 : 4;
- u32 ts_pll_enable : 1;
- u32 time_sync_tspll_align_sel : 1;
- u32 ext_synce_sel : 1;
- u32 ref1588_ck_div : 4;
- u32 time_ref_sel : 1;
-
- };
- u32 val;
-};
-
-#define NAC_CGU_DWORD24 0x60
-union nac_cgu_dword24 {
- struct {
- u32 tspll_fbdiv_frac : 22;
- u32 misc20 : 2;
- u32 ts_pll_enable : 1;
- u32 time_sync_tspll_align_sel : 1;
- u32 ext_synce_sel : 1;
- u32 ref1588_ck_div : 4;
- u32 time_ref_sel : 1;
- };
- u32 val;
-};
-
-#define TSPLL_CNTR_BIST_SETTINGS 0x344
-union tspll_cntr_bist_settings {
- struct {
- u32 i_irefgen_settling_time_cntr_7_0 : 8;
- u32 i_irefgen_settling_time_ro_standby_1_0 : 2;
- u32 reserved195 : 5;
- u32 i_plllock_sel_0 : 1;
- u32 i_plllock_sel_1 : 1;
- u32 i_plllock_cnt_6_0 : 7;
- u32 i_plllock_cnt_10_7 : 4;
- u32 reserved200 : 4;
- };
- u32 val;
-};
-
-#define TSPLL_RO_BWM_LF 0x370
-union tspll_ro_bwm_lf {
- struct {
- u32 bw_freqov_high_cri_7_0 : 8;
- u32 bw_freqov_high_cri_9_8 : 2;
- u32 biascaldone_cri : 1;
- u32 plllock_gain_tran_cri : 1;
- u32 plllock_true_lock_cri : 1;
- u32 pllunlock_flag_cri : 1;
- u32 afcerr_cri : 1;
- u32 afcdone_cri : 1;
- u32 feedfwrdgain_cal_cri_7_0 : 8;
- u32 m2fbdivmod_cri_7_0 : 8;
- };
- u32 val;
-};
-
-#define TSPLL_RO_LOCK_E825C 0x3f0
-union tspll_ro_lock_e825c {
- struct {
- u32 bw_freqov_high_cri_7_0 : 8;
- u32 bw_freqov_high_cri_9_8 : 2;
- u32 reserved455 : 1;
- u32 plllock_gain_tran_cri : 1;
- u32 plllock_true_lock_cri : 1;
- u32 pllunlock_flag_cri : 1;
- u32 afcerr_cri : 1;
- u32 afcdone_cri : 1;
- u32 feedfwrdgain_cal_cri_7_0 : 8;
- u32 reserved462 : 8;
- };
- u32 val;
-};
-
-#define TSPLL_BW_TDC_E825C 0x31c
-union tspll_bw_tdc_e825c {
- struct {
- u32 i_tdc_offset_lock_1_0 : 2;
- u32 i_bbthresh1_2_0 : 3;
- u32 i_bbthresh2_2_0 : 3;
- u32 i_tdcsel_1_0 : 2;
- u32 i_tdcovccorr_en_h : 1;
- u32 i_divretimeren : 1;
- u32 i_bw_ampmeas_window : 1;
- u32 i_bw_lowerbound_2_0 : 3;
- u32 i_bw_upperbound_2_0 : 3;
- u32 i_bw_mode_1_0 : 2;
- u32 i_ft_mode_sel_2_0 : 3;
- u32 i_bwphase_4_0 : 5;
- u32 i_plllock_sel_1_0 : 2;
- u32 i_afc_divratio : 1;
- };
- u32 val;
-};
-
-#endif /* _ICE_CGU_REGS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 4fedf0181c4e..003d60a4db21 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -171,6 +171,15 @@ static int ice_set_mac_type(struct ice_hw *hw)
case ICE_DEV_ID_E830_XXV_QSFP:
case ICE_DEV_ID_E830C_SFP:
case ICE_DEV_ID_E830_XXV_SFP:
+ case ICE_DEV_ID_E835CC_BACKPLANE:
+ case ICE_DEV_ID_E835CC_QSFP56:
+ case ICE_DEV_ID_E835CC_SFP:
+ case ICE_DEV_ID_E835C_BACKPLANE:
+ case ICE_DEV_ID_E835C_QSFP:
+ case ICE_DEV_ID_E835C_SFP:
+ case ICE_DEV_ID_E835_L_BACKPLANE:
+ case ICE_DEV_ID_E835_L_QSFP:
+ case ICE_DEV_ID_E835_L_SFP:
hw->mac_type = ICE_MAC_E830;
break;
default:
@@ -239,7 +248,7 @@ static bool ice_is_pf_c827(struct ice_hw *hw)
*/
int ice_clear_pf_cfg(struct ice_hw *hw)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
@@ -267,12 +276,12 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
{
struct ice_aqc_manage_mac_read_resp *resp;
struct ice_aqc_manage_mac_read *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
u16 flags;
u8 i;
- cmd = &desc.params.mac_read;
+ cmd = libie_aq_raw(&desc);
if (buf_size < sizeof(*resp))
return -EINVAL;
@@ -321,12 +330,12 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
{
struct ice_aqc_get_phy_caps *cmd;
u16 pcaps_size = sizeof(*pcaps);
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
const char *prefix;
struct ice_hw *hw;
int status;
- cmd = &desc.params.get_phy;
+ cmd = libie_aq_raw(&desc);
if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
return -EINVAL;
@@ -415,9 +424,9 @@ ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
struct ice_sq_cd *cd)
{
struct ice_aqc_get_link_topo *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.get_link_topo;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
@@ -445,19 +454,20 @@ int
ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
u8 *node_part_number, u16 *node_handle)
{
- struct ice_aq_desc desc;
+ struct ice_aqc_get_link_topo *resp;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
- desc.params.get_link_topo = *cmd;
+ resp = libie_aq_raw(&desc);
+ *resp = *cmd;
if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
return -EINTR;
if (node_handle)
- *node_handle =
- le16_to_cpu(desc.params.get_link_topo.addr.handle);
+ *node_handle = le16_to_cpu(resp->addr.handle);
if (node_part_number)
- *node_part_number = desc.params.get_link_topo.node_part_num;
+ *node_part_number = resp->node_part_num;
return 0;
}
@@ -680,8 +690,8 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *li_old, *li;
enum ice_media_type *hw_media_type;
struct ice_fc_info *hw_fc_info;
+ struct libie_aq_desc desc;
bool tx_pause, rx_pause;
- struct ice_aq_desc desc;
struct ice_hw *hw;
u16 cmd_flags;
int status;
@@ -696,7 +706,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
- resp = &desc.params.get_link_status;
+ resp = libie_aq_raw(&desc);
resp->cmd_flags = cpu_to_le16(cmd_flags);
resp->lport_num = pi->lport;
@@ -825,9 +835,9 @@ int
ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
{
struct ice_aqc_set_mac_cfg *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.set_mac_cfg;
+ cmd = libie_aq_raw(&desc);
if (max_frame_size == 0)
return -EINVAL;
@@ -1342,6 +1352,26 @@ static void ice_copy_rxq_ctx_to_hw(struct ice_hw *hw,
}
}
+/**
+ * ice_copy_rxq_ctx_from_hw - Copy packed Rx Queue context from HW registers
+ * @hw: pointer to the hardware structure
+ * @rxq_ctx: pointer to the packed Rx queue context
+ * @rxq_index: the index of the Rx queue
+ */
+static void ice_copy_rxq_ctx_from_hw(struct ice_hw *hw,
+ ice_rxq_ctx_buf_t *rxq_ctx,
+ u32 rxq_index)
+{
+ u32 *ctx = (u32 *)rxq_ctx;
+
+ /* Copy each dword separately from HW */
+ for (int i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++, ctx++) {
+ *ctx = rd32(hw, QRX_CONTEXT(i, rxq_index));
+
+ ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, *ctx);
+ }
+}
+
#define ICE_CTX_STORE(struct_name, struct_field, width, lsb) \
PACKED_FIELD((lsb) + (width) - 1, (lsb), struct struct_name, struct_field)
@@ -1386,6 +1416,21 @@ static void ice_pack_rxq_ctx(const struct ice_rlan_ctx *ctx,
}
/**
+ * ice_unpack_rxq_ctx - Unpack Rx queue context from a HW buffer
+ * @buf: the HW buffer to unpack from
+ * @ctx: the Rx queue context to unpack
+ *
+ * Unpack the Rx queue context from the HW buffer into the CPU-friendly
+ * structure.
+ */
+static void ice_unpack_rxq_ctx(const ice_rxq_ctx_buf_t *buf,
+ struct ice_rlan_ctx *ctx)
+{
+ unpack_fields(buf, sizeof(*buf), ctx, ice_rlan_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
+/**
* ice_write_rxq_ctx - Write Rx Queue context to hardware
* @hw: pointer to the hardware structure
* @rlan_ctx: pointer to the unpacked Rx queue context
@@ -1410,6 +1455,31 @@ int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
return 0;
}
+/**
+ * ice_read_rxq_ctx - Read Rx queue context from HW
+ * @hw: pointer to the hardware structure
+ * @rlan_ctx: pointer to the Rx queue context
+ * @rxq_index: the index of the Rx queue
+ *
+ * Read the Rx queue context from the hardware registers, and unpack it into
+ * the sparse Rx queue context structure.
+ *
+ * Returns: 0 on success, or -EINVAL if the Rx queue index is invalid.
+ */
+int ice_read_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
+ u32 rxq_index)
+{
+ ice_rxq_ctx_buf_t buf = {};
+
+ if (rxq_index > QRX_CTRL_MAX_INDEX)
+ return -EINVAL;
+
+ ice_copy_rxq_ctx_from_hw(hw, &buf, rxq_index);
+ ice_unpack_rxq_ctx(&buf, rlan_ctx);
+
+ return 0;
+}
+
/* LAN Tx Queue Context */
static const struct packed_field_u8 ice_tlan_ctx_fields[] = {
/* Field Width LSB */
@@ -1443,12 +1513,12 @@ static const struct packed_field_u8 ice_tlan_ctx_fields[] = {
};
/**
- * ice_pack_txq_ctx - Pack Tx queue context into a HW buffer
+ * ice_pack_txq_ctx - Pack Tx queue context into Admin Queue buffer
* @ctx: the Tx queue context to pack
- * @buf: the HW buffer to pack into
+ * @buf: the Admin Queue HW buffer to pack into
*
* Pack the Tx queue context from the CPU-friendly unpacked buffer into its
- * bit-packed HW layout.
+ * bit-packed Admin Queue layout.
*/
void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf)
{
@@ -1456,6 +1526,173 @@ void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf)
QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
}
+/**
+ * ice_pack_txq_ctx_full - Pack Tx queue context into a HW buffer
+ * @ctx: the Tx queue context to pack
+ * @buf: the HW buffer to pack into
+ *
+ * Pack the Tx queue context from the CPU-friendly unpacked buffer into its
+ * bit-packed HW layout, including the internal data portion.
+ */
+static void ice_pack_txq_ctx_full(const struct ice_tlan_ctx *ctx,
+ ice_txq_ctx_buf_full_t *buf)
+{
+ pack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
+/**
+ * ice_unpack_txq_ctx_full - Unpack Tx queue context from a HW buffer
+ * @buf: the HW buffer to unpack from
+ * @ctx: the Tx queue context to unpack
+ *
+ * Unpack the Tx queue context from the HW buffer (including the full internal
+ * state) into the CPU-friendly structure.
+ */
+static void ice_unpack_txq_ctx_full(const ice_txq_ctx_buf_full_t *buf,
+ struct ice_tlan_ctx *ctx)
+{
+ unpack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
+/**
+ * ice_copy_txq_ctx_from_hw - Copy Tx Queue context from HW registers
+ * @hw: pointer to the hardware structure
+ * @txq_ctx: pointer to the packed Tx queue context, including internal state
+ * @txq_index: the index of the Tx queue
+ *
+ * Copy Tx Queue context from HW register space to dense structure
+ */
+static void ice_copy_txq_ctx_from_hw(struct ice_hw *hw,
+ ice_txq_ctx_buf_full_t *txq_ctx,
+ u32 txq_index)
+{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
+ u32 *ctx = (u32 *)txq_ctx;
+ u32 txq_base, reg;
+
+ /* Get Tx queue base within card space */
+ txq_base = rd32(hw, PFLAN_TX_QALLOC(hw->pf_id));
+ txq_base = FIELD_GET(PFLAN_TX_QALLOC_FIRSTQ_M, txq_base);
+
+ reg = FIELD_PREP(GLCOMM_QTX_CNTX_CTL_CMD_M,
+ GLCOMM_QTX_CNTX_CTL_CMD_READ) |
+ FIELD_PREP(GLCOMM_QTX_CNTX_CTL_QUEUE_ID_M,
+ txq_base + txq_index) |
+ GLCOMM_QTX_CNTX_CTL_CMD_EXEC_M;
+
+ /* Prevent other PFs on the same adapter from accessing the Tx queue
+ * context interface concurrently.
+ */
+ spin_lock(&pf->adapter->txq_ctx_lock);
+
+ wr32(hw, GLCOMM_QTX_CNTX_CTL, reg);
+ ice_flush(hw);
+
+ /* Copy each dword separately from HW */
+ for (int i = 0; i < ICE_TXQ_CTX_FULL_SIZE_DWORDS; i++, ctx++) {
+ *ctx = rd32(hw, GLCOMM_QTX_CNTX_DATA(i));
+
+ ice_debug(hw, ICE_DBG_QCTX, "qtxdata[%d]: %08X\n", i, *ctx);
+ }
+
+ spin_unlock(&pf->adapter->txq_ctx_lock);
+}
+
+/**
+ * ice_copy_txq_ctx_to_hw - Copy Tx Queue context into HW registers
+ * @hw: pointer to the hardware structure
+ * @txq_ctx: pointer to the packed Tx queue context, including internal state
+ * @txq_index: the index of the Tx queue
+ */
+static void ice_copy_txq_ctx_to_hw(struct ice_hw *hw,
+ const ice_txq_ctx_buf_full_t *txq_ctx,
+ u32 txq_index)
+{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
+ u32 txq_base, reg;
+
+ /* Get Tx queue base within card space */
+ txq_base = rd32(hw, PFLAN_TX_QALLOC(hw->pf_id));
+ txq_base = FIELD_GET(PFLAN_TX_QALLOC_FIRSTQ_M, txq_base);
+
+ reg = FIELD_PREP(GLCOMM_QTX_CNTX_CTL_CMD_M,
+ GLCOMM_QTX_CNTX_CTL_CMD_WRITE_NO_DYN) |
+ FIELD_PREP(GLCOMM_QTX_CNTX_CTL_QUEUE_ID_M,
+ txq_base + txq_index) |
+ GLCOMM_QTX_CNTX_CTL_CMD_EXEC_M;
+
+ /* Prevent other PFs on the same adapter from accessing the Tx queue
+ * context interface concurrently.
+ */
+ spin_lock(&pf->adapter->txq_ctx_lock);
+
+ /* Copy each dword separately to HW */
+ for (int i = 0; i < ICE_TXQ_CTX_FULL_SIZE_DWORDS; i++) {
+ u32 ctx = ((const u32 *)txq_ctx)[i];
+
+ wr32(hw, GLCOMM_QTX_CNTX_DATA(i), ctx);
+
+ ice_debug(hw, ICE_DBG_QCTX, "qtxdata[%d]: %08X\n", i, ctx);
+ }
+
+ wr32(hw, GLCOMM_QTX_CNTX_CTL, reg);
+ ice_flush(hw);
+
+ spin_unlock(&pf->adapter->txq_ctx_lock);
+}
+
+/**
+ * ice_read_txq_ctx - Read Tx queue context from HW
+ * @hw: pointer to the hardware structure
+ * @tlan_ctx: pointer to the Tx queue context
+ * @txq_index: the index of the Tx queue
+ *
+ * Read the Tx queue context from the HW registers, then unpack it into the
+ * ice_tlan_ctx structure for use.
+ *
+ * Returns: 0 on success, or -EINVAL on an invalid Tx queue index.
+ */
+int ice_read_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx,
+ u32 txq_index)
+{
+ ice_txq_ctx_buf_full_t buf = {};
+
+ if (txq_index > QTX_COMM_HEAD_MAX_INDEX)
+ return -EINVAL;
+
+ ice_copy_txq_ctx_from_hw(hw, &buf, txq_index);
+ ice_unpack_txq_ctx_full(&buf, tlan_ctx);
+
+ return 0;
+}
+
+/**
+ * ice_write_txq_ctx - Write Tx queue context to HW
+ * @hw: pointer to the hardware structure
+ * @tlan_ctx: pointer to the Tx queue context
+ * @txq_index: the index of the Tx queue
+ *
+ * Pack the Tx queue context into the dense HW layout, then write it into the
+ * HW registers.
+ *
+ * Returns: 0 on success, or -EINVAL on an invalid Tx queue index.
+ */
+int ice_write_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx,
+ u32 txq_index)
+{
+ ice_txq_ctx_buf_full_t buf = {};
+
+ if (txq_index > QTX_COMM_HEAD_MAX_INDEX)
+ return -EINVAL;
+
+ ice_pack_txq_ctx_full(tlan_ctx, &buf);
+ ice_copy_txq_ctx_to_hw(hw, &buf, txq_index);
+
+ return 0;
+}
+
/* Sideband Queue command wrappers */
/**
@@ -1471,7 +1708,7 @@ ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd)
{
return ice_sq_send_cmd(hw, ice_get_sbq(hw),
- (struct ice_aq_desc *)desc, buf, buf_size, cd);
+ (struct libie_aq_desc *)desc, buf, buf_size, cd);
}
/**
@@ -1556,10 +1793,10 @@ static bool ice_should_retry_sq_send_cmd(u16 opcode)
*/
static int
ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
- struct ice_aq_desc *desc, void *buf, u16 buf_size,
+ struct libie_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc_cpy;
+ struct libie_aq_desc desc_cpy;
bool is_cmd_for_retry;
u8 idx = 0;
u16 opcode;
@@ -1580,7 +1817,7 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
if (!is_cmd_for_retry || !status ||
- hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
+ hw->adminq.sq_last_status != LIBIE_AQ_RC_EBUSY)
break;
memcpy(desc, &desc_cpy, sizeof(desc_cpy));
@@ -1603,10 +1840,10 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
* Helper function to send FW Admin Queue commands to the FW Admin Queue.
*/
int
-ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
+ice_aq_send_cmd(struct ice_hw *hw, struct libie_aq_desc *desc, void *buf,
u16 buf_size, struct ice_sq_cd *cd)
{
- struct ice_aqc_req_res *cmd = &desc->params.res_owner;
+ struct libie_aqc_req_res *cmd = libie_aq_raw(desc);
bool lock_acquired = false;
int status;
@@ -1637,7 +1874,7 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
case ice_aqc_opc_get_recipe_to_profile:
break;
case ice_aqc_opc_release_res:
- if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
+ if (le16_to_cpu(cmd->res_id) == LIBIE_AQC_RES_ID_GLBL_LOCK)
break;
fallthrough;
default:
@@ -1662,8 +1899,8 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
*/
int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
{
- struct ice_aqc_get_ver *resp;
- struct ice_aq_desc desc;
+ struct libie_aqc_get_ver *resp;
+ struct libie_aq_desc desc;
int status;
resp = &desc.params.get_ver;
@@ -1699,8 +1936,8 @@ int
ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
struct ice_sq_cd *cd)
{
- struct ice_aqc_driver_ver *cmd;
- struct ice_aq_desc desc;
+ struct libie_aqc_driver_ver *cmd;
+ struct libie_aq_desc desc;
u16 len;
cmd = &desc.params.driver_ver;
@@ -1710,7 +1947,7 @@ ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
cmd->major_ver = dv->major_ver;
cmd->minor_ver = dv->minor_ver;
cmd->build_ver = dv->build_ver;
@@ -1735,9 +1972,9 @@ ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
{
struct ice_aqc_q_shutdown *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.q_shutdown;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
@@ -1778,8 +2015,8 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
struct ice_sq_cd *cd)
{
- struct ice_aqc_req_res *cmd_resp;
- struct ice_aq_desc desc;
+ struct libie_aqc_req_res *cmd_resp;
+ struct libie_aq_desc desc;
int status;
cmd_resp = &desc.params.res_owner;
@@ -1801,20 +2038,20 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
/* Global config lock response utilizes an additional status field.
*
* If the Global config lock resource is held by some other driver, the
- * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
+ * command completes with LIBIE_AQ_RES_GLBL_IN_PROG in the status field
* and the timeout field indicates the maximum time the current owner
* of the resource has to free it.
*/
if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
- if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
+ if (le16_to_cpu(cmd_resp->status) == LIBIE_AQ_RES_GLBL_SUCCESS) {
*timeout = le32_to_cpu(cmd_resp->timeout);
return 0;
} else if (le16_to_cpu(cmd_resp->status) ==
- ICE_AQ_RES_GLBL_IN_PROG) {
+ LIBIE_AQ_RES_GLBL_IN_PROG) {
*timeout = le32_to_cpu(cmd_resp->timeout);
return -EIO;
} else if (le16_to_cpu(cmd_resp->status) ==
- ICE_AQ_RES_GLBL_DONE) {
+ LIBIE_AQ_RES_GLBL_DONE) {
return -EALREADY;
}
@@ -1827,7 +2064,7 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
* with a busy return value and the timeout field indicates the maximum
* time the current owner of the resource has to free it.
*/
- if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
+ if (!status || hw->adminq.sq_last_status == LIBIE_AQ_RC_EBUSY)
*timeout = le32_to_cpu(cmd_resp->timeout);
return status;
@@ -1846,8 +2083,8 @@ static int
ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
struct ice_sq_cd *cd)
{
- struct ice_aqc_req_res *cmd;
- struct ice_aq_desc desc;
+ struct libie_aqc_req_res *cmd;
+ struct libie_aq_desc desc;
cmd = &desc.params.res_owner;
@@ -1956,16 +2193,16 @@ int ice_aq_alloc_free_res(struct ice_hw *hw,
enum ice_adminq_opc opc)
{
struct ice_aqc_alloc_free_res_cmd *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.sw_res_ctrl;
+ cmd = libie_aq_raw(&desc);
if (!buf || buf_size < flex_array_size(buf, elem, 1))
return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
cmd->num_entries = cpu_to_le16(1);
@@ -2079,7 +2316,7 @@ static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
*/
static bool
ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
- struct ice_aqc_list_caps_elem *elem, const char *prefix)
+ struct libie_aqc_list_caps_elem *elem, const char *prefix)
{
u32 logical_id = le32_to_cpu(elem->logical_id);
u32 phys_id = le32_to_cpu(elem->phys_id);
@@ -2088,17 +2325,17 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
bool found = true;
switch (cap) {
- case ICE_AQC_CAPS_VALID_FUNCTIONS:
+ case LIBIE_AQC_CAPS_VALID_FUNCTIONS:
caps->valid_functions = number;
ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
caps->valid_functions);
break;
- case ICE_AQC_CAPS_SRIOV:
+ case LIBIE_AQC_CAPS_SRIOV:
caps->sr_iov_1_1 = (number == 1);
ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
caps->sr_iov_1_1);
break;
- case ICE_AQC_CAPS_DCB:
+ case LIBIE_AQC_CAPS_DCB:
caps->dcb = (number == 1);
caps->active_tc_bitmap = logical_id;
caps->maxtc = phys_id;
@@ -2107,7 +2344,7 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
caps->active_tc_bitmap);
ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
break;
- case ICE_AQC_CAPS_RSS:
+ case LIBIE_AQC_CAPS_RSS:
caps->rss_table_size = number;
caps->rss_table_entry_width = logical_id;
ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
@@ -2115,7 +2352,7 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
caps->rss_table_entry_width);
break;
- case ICE_AQC_CAPS_RXQS:
+ case LIBIE_AQC_CAPS_RXQS:
caps->num_rxq = number;
caps->rxq_first_id = phys_id;
ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
@@ -2123,7 +2360,7 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
caps->rxq_first_id);
break;
- case ICE_AQC_CAPS_TXQS:
+ case LIBIE_AQC_CAPS_TXQS:
caps->num_txq = number;
caps->txq_first_id = phys_id;
ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
@@ -2131,7 +2368,7 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
caps->txq_first_id);
break;
- case ICE_AQC_CAPS_MSIX:
+ case LIBIE_AQC_CAPS_MSIX:
caps->num_msix_vectors = number;
caps->msix_vector_first_id = phys_id;
ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
@@ -2139,56 +2376,56 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
caps->msix_vector_first_id);
break;
- case ICE_AQC_CAPS_PENDING_NVM_VER:
+ case LIBIE_AQC_CAPS_PENDING_NVM_VER:
caps->nvm_update_pending_nvm = true;
ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix);
break;
- case ICE_AQC_CAPS_PENDING_OROM_VER:
+ case LIBIE_AQC_CAPS_PENDING_OROM_VER:
caps->nvm_update_pending_orom = true;
ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix);
break;
- case ICE_AQC_CAPS_PENDING_NET_VER:
+ case LIBIE_AQC_CAPS_PENDING_NET_VER:
caps->nvm_update_pending_netlist = true;
ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix);
break;
- case ICE_AQC_CAPS_NVM_MGMT:
+ case LIBIE_AQC_CAPS_NVM_MGMT:
caps->nvm_unified_update =
(number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
true : false;
ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
caps->nvm_unified_update);
break;
- case ICE_AQC_CAPS_RDMA:
+ case LIBIE_AQC_CAPS_RDMA:
if (IS_ENABLED(CONFIG_INFINIBAND_IRDMA))
caps->rdma = (number == 1);
ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma);
break;
- case ICE_AQC_CAPS_MAX_MTU:
+ case LIBIE_AQC_CAPS_MAX_MTU:
caps->max_mtu = number;
ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
prefix, caps->max_mtu);
break;
- case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
+ case LIBIE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
caps->pcie_reset_avoidance = (number > 0);
ice_debug(hw, ICE_DBG_INIT,
"%s: pcie_reset_avoidance = %d\n", prefix,
caps->pcie_reset_avoidance);
break;
- case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
+ case LIBIE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
caps->reset_restrict_support = (number == 1);
ice_debug(hw, ICE_DBG_INIT,
"%s: reset_restrict_support = %d\n", prefix,
caps->reset_restrict_support);
break;
- case ICE_AQC_CAPS_FW_LAG_SUPPORT:
- caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG);
+ case LIBIE_AQC_CAPS_FW_LAG_SUPPORT:
+ caps->roce_lag = !!(number & LIBIE_AQC_BIT_ROCEV2_LAG);
ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n",
prefix, caps->roce_lag);
- caps->sriov_lag = !!(number & ICE_AQC_BIT_SRIOV_LAG);
+ caps->sriov_lag = !!(number & LIBIE_AQC_BIT_SRIOV_LAG);
ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n",
prefix, caps->sriov_lag);
break;
- case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE:
+ case LIBIE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE:
caps->tx_sched_topo_comp_mode_en = (number == 1);
break;
default:
@@ -2242,7 +2479,7 @@ ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
*/
static void
ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
u32 logical_id = le32_to_cpu(cap->logical_id);
u32 number = le32_to_cpu(cap->number);
@@ -2265,7 +2502,7 @@ ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
*/
static void
ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
@@ -2284,7 +2521,7 @@ ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
*/
static void
ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
struct ice_ts_func_info *info = &func_p->ts_func_info;
u32 number = le32_to_cpu(cap->number);
@@ -2301,12 +2538,12 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number);
info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
} else {
- info->clk_freq = ICE_TIME_REF_FREQ_156_250;
- info->clk_src = ICE_CLK_SRC_TCXO;
+ info->clk_freq = ICE_TSPLL_FREQ_156_250;
+ info->clk_src = ICE_CLK_SRC_TIME_REF;
}
- if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) {
- info->time_ref = (enum ice_time_ref_freq)info->clk_freq;
+ if (info->clk_freq < NUM_ICE_TSPLL_FREQ) {
+ info->time_ref = (enum ice_tspll_freq)info->clk_freq;
} else {
/* Unknown clock frequency, so assume a (probably incorrect)
* default to avoid out-of-bounds look ups of frequency
@@ -2314,7 +2551,7 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
*/
ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n",
info->clk_freq);
- info->time_ref = ICE_TIME_REF_FREQ_25_000;
+ info->time_ref = ICE_TSPLL_FREQ_25_000;
}
ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
@@ -2383,7 +2620,7 @@ static void
ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
void *buf, u32 cap_count)
{
- struct ice_aqc_list_caps_elem *cap_resp;
+ struct libie_aqc_list_caps_elem *cap_resp;
u32 i;
cap_resp = buf;
@@ -2398,16 +2635,16 @@ ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
&cap_resp[i], "func caps");
switch (cap) {
- case ICE_AQC_CAPS_VF:
+ case LIBIE_AQC_CAPS_VF:
ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_VSI:
+ case LIBIE_AQC_CAPS_VSI:
ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_1588:
+ case LIBIE_AQC_CAPS_1588:
ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_FD:
+ case LIBIE_AQC_CAPS_FD:
ice_parse_fdir_func_caps(hw, func_p);
break;
default:
@@ -2451,7 +2688,7 @@ static int ice_func_id_to_logical_id(u32 active_function_bitmap, u8 pf_id)
*/
static void
ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
u32 number = le32_to_cpu(cap->number);
@@ -2472,7 +2709,7 @@ ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
*/
static void
ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
u32 number = le32_to_cpu(cap->number);
@@ -2491,7 +2728,7 @@ ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
*/
static void
ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
u32 number = le32_to_cpu(cap->number);
@@ -2510,7 +2747,7 @@ ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
*/
static void
ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
struct ice_ts_dev_info *info = &dev_p->ts_dev_info;
u32 logical_id = le32_to_cpu(cap->logical_id);
@@ -2571,7 +2808,7 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
*/
static void
ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
u32 number = le32_to_cpu(cap->number);
@@ -2591,7 +2828,7 @@ ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
*/
static void
ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
dev_p->supported_sensors = le32_to_cpu(cap->number);
@@ -2610,7 +2847,7 @@ ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
*/
static void ice_parse_nac_topo_dev_caps(struct ice_hw *hw,
struct ice_hw_dev_caps *dev_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
dev_p->nac_topo.mode = le32_to_cpu(cap->number);
dev_p->nac_topo.id = le32_to_cpu(cap->phys_id) & ICE_NAC_TOPO_ID_M;
@@ -2646,7 +2883,7 @@ static void
ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
void *buf, u32 cap_count)
{
- struct ice_aqc_list_caps_elem *cap_resp;
+ struct libie_aqc_list_caps_elem *cap_resp;
u32 i;
cap_resp = buf;
@@ -2661,25 +2898,25 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
&cap_resp[i], "dev caps");
switch (cap) {
- case ICE_AQC_CAPS_VALID_FUNCTIONS:
+ case LIBIE_AQC_CAPS_VALID_FUNCTIONS:
ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_VF:
+ case LIBIE_AQC_CAPS_VF:
ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_VSI:
+ case LIBIE_AQC_CAPS_VSI:
ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_1588:
+ case LIBIE_AQC_CAPS_1588:
ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_FD:
+ case LIBIE_AQC_CAPS_FD:
ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_SENSOR_READING:
+ case LIBIE_AQC_CAPS_SENSOR_READING:
ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_NAC_TOPOLOGY:
+ case LIBIE_AQC_CAPS_NAC_TOPOLOGY:
ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]);
break;
default:
@@ -2799,8 +3036,8 @@ int
ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
- struct ice_aqc_list_caps *cmd;
- struct ice_aq_desc desc;
+ struct libie_aqc_list_caps *cmd;
+ struct libie_aq_desc desc;
int status;
cmd = &desc.params.get_cap;
@@ -2841,7 +3078,7 @@ ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
* device will return, we can simply send a 4KB buffer, the maximum
* possible size that firmware can return.
*/
- cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
+ cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct libie_aqc_list_caps_elem);
status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
ice_aqc_opc_list_dev_caps, NULL);
@@ -2875,7 +3112,7 @@ ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
* device will return, we can simply send a 4KB buffer, the maximum
* possible size that firmware can return.
*/
- cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
+ cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct libie_aqc_list_caps_elem);
status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
ice_aqc_opc_list_func_caps, NULL);
@@ -2984,9 +3221,9 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd)
{
struct ice_aqc_manage_mac_write *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.mac_write;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
cmd->flags = flags;
@@ -3003,10 +3240,12 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
*/
static int ice_aq_clear_pxe_mode(struct ice_hw *hw)
{
- struct ice_aq_desc desc;
+ struct ice_aqc_clear_pxe *cmd;
+ struct libie_aq_desc desc;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
- desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
+ cmd->rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
}
@@ -3039,10 +3278,10 @@ ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan,
{
struct ice_aqc_set_port_params *cmd;
struct ice_hw *hw = pi->hw;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 cmd_flags = 0;
- cmd = &desc.params.set_port_params;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
if (double_vlan)
@@ -3279,7 +3518,8 @@ int
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct ice_aqc_set_phy_cfg *cmd;
+ struct libie_aq_desc desc;
int status;
if (!cfg)
@@ -3294,8 +3534,9 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
}
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
- desc.params.set_phy.lport_num = pi->lport;
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ cmd = libie_aq_raw(&desc);
+ cmd->lport_num = pi->lport;
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
@@ -3311,7 +3552,7 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
cfg->link_fec_opt);
status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
- if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
+ if (hw->adminq.sq_last_status == LIBIE_AQ_RC_EMODE)
status = 0;
if (!status)
@@ -3368,17 +3609,17 @@ int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code,
{
struct ice_aqc_dnl_call_command *cmd;
struct ice_aqc_dnl_call buf = {};
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int err;
buf.sto.txrx_equa_reqs.data_in = cpu_to_le16(data_in);
buf.sto.txrx_equa_reqs.op_code_serdes_sel =
cpu_to_le16(op_code | (serdes_num & 0xF));
- cmd = &desc.params.dnl_call;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dnl_call);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF |
- ICE_AQ_FLAG_RD |
- ICE_AQ_FLAG_SI);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_BUF |
+ LIBIE_AQ_FLAG_RD |
+ LIBIE_AQ_FLAG_SI);
desc.datalen = cpu_to_le16(sizeof(struct ice_aqc_dnl_call));
cmd->activity_id = cpu_to_le16(ICE_AQC_ACT_ID_DNL);
@@ -3416,7 +3657,7 @@ static const u32 fec_reg[][ICE_FEC_MAX] = {
int ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
enum ice_fec_stats_types fec_type, u32 *output)
{
- u16 flag = (ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF | ICE_AQ_FLAG_SI);
+ u16 flag = (LIBIE_AQ_FLAG_RD | LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_SI);
struct ice_sbq_msg_input msg = {};
u32 receiver_id, reg_offset;
int err;
@@ -3839,9 +4080,9 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd)
{
struct ice_aqc_restart_an *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.restart_an;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
@@ -3869,9 +4110,9 @@ ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
struct ice_sq_cd *cd)
{
struct ice_aqc_set_event_mask *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.set_event_mask;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
@@ -3893,9 +4134,9 @@ int
ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
{
struct ice_aqc_set_mac_lb *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.set_mac_lb;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
if (ena_lpbk)
@@ -3918,9 +4159,9 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
{
struct ice_aqc_set_port_id_led *cmd;
struct ice_hw *hw = pi->hw;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.set_port_id_led;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
@@ -3956,7 +4197,7 @@ ice_aq_get_port_options(struct ice_hw *hw,
u8 *pending_option_idx, bool *pending_option_valid)
{
struct ice_aqc_get_port_options *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
u8 i;
@@ -3964,7 +4205,7 @@ ice_aq_get_port_options(struct ice_hw *hw,
if (*option_count < ICE_AQC_PORT_OPT_COUNT_M)
return -EINVAL;
- cmd = &desc.params.get_port_options;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options);
if (lport_valid)
@@ -4030,12 +4271,12 @@ ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
u8 new_option)
{
struct ice_aqc_set_port_option *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
if (new_option > ICE_AQC_PORT_OPT_COUNT_M)
return -EINVAL;
- cmd = &desc.params.set_port_option;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option);
if (lport_valid)
@@ -4080,7 +4321,7 @@ int ice_get_phy_lane_number(struct ice_hw *hw)
speed = options[active_idx].max_lane_speed;
/* If we don't get speed for this lane, it's unoccupied */
- if (speed > ICE_AQC_PORT_OPT_MAX_LANE_200G)
+ if (speed > ICE_AQC_PORT_OPT_MAX_LANE_40G)
continue;
if (hw->pf_id == lport) {
@@ -4121,7 +4362,7 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
bool write, struct ice_sq_cd *cd)
{
struct ice_aqc_sff_eeprom *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 i2c_bus_addr;
int status;
@@ -4129,8 +4370,8 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
- cmd = &desc.params.read_write_sff_param;
- desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
+ cmd = libie_aq_raw(&desc);
+ desc.flags = cpu_to_le16(LIBIE_AQ_FLAG_RD);
cmd->lport_num = (u8)(lport & 0xff);
cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
i2c_bus_addr = FIELD_PREP(ICE_AQC_SFF_I2CBUS_7BIT_M, bus_addr >> 1) |
@@ -4190,7 +4431,7 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw,
struct ice_aqc_get_set_rss_lut *desc_params;
enum ice_aqc_lut_flags flags;
enum ice_lut_size lut_size;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u8 *lut = params->lut;
@@ -4206,9 +4447,9 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw,
opcode = set ? ice_aqc_opc_set_rss_lut : ice_aqc_opc_get_rss_lut;
ice_fill_dflt_direct_cmd_desc(&desc, opcode);
if (set)
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
- desc_params = &desc.params.get_set_rss_lut;
+ desc_params = libie_aq_raw(&desc);
vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID);
@@ -4263,16 +4504,16 @@ __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
{
struct ice_aqc_get_set_rss_key *desc_params;
u16 key_size = sizeof(*key);
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
if (set) {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
} else {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
}
- desc_params = &desc.params.get_set_rss_key;
+ desc_params = libie_aq_raw(&desc);
desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID);
return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
@@ -4344,10 +4585,10 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
{
struct ice_aqc_add_tx_qgrp *list;
struct ice_aqc_add_txqs *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 i, sum_size = 0;
- cmd = &desc.params.add_txqs;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
@@ -4366,7 +4607,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
if (buf_size != sum_size)
return -EINVAL;
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
cmd->num_qgrps = num_qgrps;
@@ -4393,12 +4634,12 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
{
struct ice_aqc_dis_txq_item *item;
struct ice_aqc_dis_txqs *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 vmvf_and_timeout;
u16 i, sz = 0;
int status;
- cmd = &desc.params.dis_txqs;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
/* qg_list can be NULL only in VM/VF reset flow */
@@ -4439,7 +4680,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
/* set RD bit to indicate that command buffer is provided by the driver
* and it needs to be read by the firmware
*/
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
for (i = 0, item = qg_list; i < num_qgrps; i++) {
u16 item_size = struct_size(item, q_id, item->num_qs);
@@ -4491,12 +4732,12 @@ ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
struct ice_sq_cd *cd)
{
struct ice_aqc_cfg_txqs *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.cfg_txqs;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_txqs);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
if (!buf)
return -EINVAL;
@@ -4532,10 +4773,10 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
{
struct ice_aqc_add_rdma_qset_data *list;
struct ice_aqc_add_rdma_qset *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 i, sum_size = 0;
- cmd = &desc.params.add_rdma_qset;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);
@@ -4553,7 +4794,7 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
if (buf_size != sum_size)
return -EINVAL;
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
cmd->num_qset_grps = num_qset_grps;
@@ -4971,6 +5212,32 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
}
/**
+ * ice_aq_get_cgu_input_pin_measure - get input pin signal measurements
+ * @hw: pointer to the HW struct
+ * @dpll_idx: index of dpll to be measured
+ * @meas: array to be filled with results
+ * @meas_num: max number of results array can hold
+ *
+ * Get CGU measurements (0x0C59) of phase and frequency offsets for input
+ * pins on given dpll.
+ *
+ * Return: 0 on success or negative value on failure.
+ */
+int ice_aq_get_cgu_input_pin_measure(struct ice_hw *hw, u8 dpll_idx,
+ struct ice_cgu_input_measure *meas,
+ u16 meas_num)
+{
+ struct ice_aqc_get_cgu_input_measure *cmd;
+ struct libie_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_measure);
+ cmd = libie_aq_raw(&desc);
+ cmd->dpll_idx_opt = dpll_idx & ICE_AQC_GET_CGU_IN_MEAS_DPLL_IDX_M;
+
+ return ice_aq_send_cmd(hw, &desc, meas, meas_num * sizeof(*meas), NULL);
+}
+
+/**
* ice_aq_get_cgu_abilities - get cgu abilities
* @hw: pointer to the HW struct
* @abilities: CGU abilities
@@ -4982,7 +5249,7 @@ int
ice_aq_get_cgu_abilities(struct ice_hw *hw,
struct ice_aqc_get_cgu_abilities *abilities)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_abilities);
return ice_aq_send_cmd(hw, &desc, abilities, sizeof(*abilities), NULL);
@@ -5005,10 +5272,10 @@ ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2,
u32 freq, s32 phase_delay)
{
struct ice_aqc_set_cgu_input_config *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_input_config);
- cmd = &desc.params.set_cgu_input_config;
+ cmd = libie_aq_raw(&desc);
cmd->input_idx = input_idx;
cmd->flags1 = flags1;
cmd->flags2 = flags2;
@@ -5037,11 +5304,11 @@ ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type,
u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay)
{
struct ice_aqc_get_cgu_input_config *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int ret;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_config);
- cmd = &desc.params.get_cgu_input_config;
+ cmd = libie_aq_raw(&desc);
cmd->input_idx = input_idx;
ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
@@ -5080,10 +5347,10 @@ ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags,
u8 src_sel, u32 freq, s32 phase_delay)
{
struct ice_aqc_set_cgu_output_config *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_output_config);
- cmd = &desc.params.set_cgu_output_config;
+ cmd = libie_aq_raw(&desc);
cmd->output_idx = output_idx;
cmd->flags = flags;
cmd->src_sel = src_sel;
@@ -5110,11 +5377,11 @@ ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags,
u8 *src_sel, u32 *freq, u32 *src_freq)
{
struct ice_aqc_get_cgu_output_config *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int ret;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_output_config);
- cmd = &desc.params.get_cgu_output_config;
+ cmd = libie_aq_raw(&desc);
cmd->output_idx = output_idx;
ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
@@ -5151,11 +5418,11 @@ ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state,
u8 *eec_mode)
{
struct ice_aqc_get_cgu_dpll_status *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_dpll_status);
- cmd = &desc.params.get_cgu_dpll_status;
+ cmd = libie_aq_raw(&desc);
cmd->dpll_num = dpll_num;
status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
@@ -5189,10 +5456,10 @@ ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state,
u8 config, u8 eec_mode)
{
struct ice_aqc_set_cgu_dpll_config *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_dpll_config);
- cmd = &desc.params.set_cgu_dpll_config;
+ cmd = libie_aq_raw(&desc);
cmd->dpll_num = dpll_num;
cmd->ref_state = ref_state;
cmd->config = config;
@@ -5216,10 +5483,10 @@ ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx,
u8 ref_priority)
{
struct ice_aqc_set_cgu_ref_prio *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_ref_prio);
- cmd = &desc.params.set_cgu_ref_prio;
+ cmd = libie_aq_raw(&desc);
cmd->dpll_num = dpll_num;
cmd->ref_idx = ref_idx;
cmd->ref_priority = ref_priority;
@@ -5242,11 +5509,11 @@ ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx,
u8 *ref_prio)
{
struct ice_aqc_get_cgu_ref_prio *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_ref_prio);
- cmd = &desc.params.get_cgu_ref_prio;
+ cmd = libie_aq_raw(&desc);
cmd->dpll_num = dpll_num;
cmd->ref_idx = ref_idx;
@@ -5272,11 +5539,11 @@ ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver,
u32 *cgu_fw_ver)
{
struct ice_aqc_get_cgu_info *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_info);
- cmd = &desc.params.get_cgu_info;
+ cmd = libie_aq_raw(&desc);
status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
if (!status) {
@@ -5303,11 +5570,11 @@ ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable,
u32 *freq)
{
struct ice_aqc_set_phy_rec_clk_out *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_rec_clk_out);
- cmd = &desc.params.set_phy_rec_clk_out;
+ cmd = libie_aq_raw(&desc);
cmd->phy_output = phy_output;
cmd->port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT;
cmd->flags = enable & ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN;
@@ -5336,11 +5603,11 @@ ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num,
u8 *flags, u16 *node_handle)
{
struct ice_aqc_get_phy_rec_clk_out *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_rec_clk_out);
- cmd = &desc.params.get_phy_rec_clk_out;
+ cmd = libie_aq_raw(&desc);
cmd->phy_output = *phy_output;
status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
@@ -5368,11 +5635,11 @@ int ice_aq_get_sensor_reading(struct ice_hw *hw,
struct ice_aqc_get_sensor_reading_resp *data)
{
struct ice_aqc_get_sensor_reading *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading);
- cmd = &desc.params.get_sensor_reading;
+ cmd = libie_aq_raw(&desc);
#define ICE_INTERNAL_TEMP_SENSOR_FORMAT 0
#define ICE_INTERNAL_TEMP_SENSOR 0
cmd->sensor = ICE_INTERNAL_TEMP_SENSOR;
@@ -5380,7 +5647,7 @@ int ice_aq_get_sensor_reading(struct ice_hw *hw,
status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
if (!status)
- memcpy(data, &desc.params.get_sensor_reading_resp,
+ memcpy(data, &desc.params.raw,
sizeof(*data));
return status;
@@ -5577,13 +5844,13 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, u8 *data,
struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc = { 0 };
+ struct libie_aq_desc desc = { 0 };
struct ice_aqc_i2c *cmd;
u8 data_size;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
- cmd = &desc.params.read_write_i2c;
+ cmd = libie_aq_raw(&desc);
if (!data)
return -EINVAL;
@@ -5600,7 +5867,7 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
struct ice_aqc_read_i2c_resp *resp;
u8 i;
- resp = &desc.params.read_i2c_resp;
+ resp = libie_aq_raw(&desc);
for (i = 0; i < data_size; i++) {
*data = resp->i2c_data[i];
data++;
@@ -5632,12 +5899,12 @@ ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, const u8 *data,
struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc = { 0 };
+ struct libie_aq_desc desc = { 0 };
struct ice_aqc_i2c *cmd;
u8 data_size;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c);
- cmd = &desc.params.read_write_i2c;
+ cmd = libie_aq_raw(&desc);
data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params);
@@ -5669,7 +5936,7 @@ ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
{
struct ice_aqc_get_link_topo *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int err;
u8 idx;
@@ -5692,7 +5959,7 @@ int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
/* If handle was not detected read it from the netlist */
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
- cmd = &desc.params.get_link_topo;
+ cmd = libie_aq_raw(&desc);
cmd->addr.topo_params.node_type_ctx =
ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL;
cmd->addr.topo_params.index = idx;
@@ -5702,13 +5969,12 @@ int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
return -ENXIO;
/* Verify if we found the right IO expander type */
- if (desc.params.get_link_topo.node_part_num !=
- ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
+ if (cmd->node_part_num != ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
return -ENXIO;
/* If present save the handle and return it */
hw->io_expander_handle =
- le16_to_cpu(desc.params.get_link_topo.addr.handle);
+ le16_to_cpu(cmd->addr.handle);
*pca9575_handle = hw->io_expander_handle;
return 0;
@@ -5759,11 +6025,11 @@ int
ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
struct ice_sq_cd *cd)
{
+ struct libie_aq_desc desc;
struct ice_aqc_gpio *cmd;
- struct ice_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
- cmd = &desc.params.read_write_gpio;
+ cmd = libie_aq_raw(&desc);
cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
cmd->gpio_num = pin_idx;
cmd->gpio_val = value ? 1 : 0;
@@ -5786,12 +6052,12 @@ int
ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
bool *value, struct ice_sq_cd *cd)
{
+ struct libie_aq_desc desc;
struct ice_aqc_gpio *cmd;
- struct ice_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
- cmd = &desc.params.read_write_gpio;
+ cmd = libie_aq_raw(&desc);
cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
cmd->gpio_num = pin_idx;
@@ -5954,9 +6220,9 @@ bool ice_is_fw_health_report_supported(struct ice_hw *hw)
int ice_aq_set_health_status_cfg(struct ice_hw *hw, u8 event_source)
{
struct ice_aqc_set_health_status_cfg *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.set_health_status_cfg;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_health_status_cfg);
@@ -5980,16 +6246,16 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_set_local_mib *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.lldp_set_mib;
+ cmd = libie_aq_raw(&desc);
if (buf_size == 0 || !buf)
return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
- desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_RD);
desc.datalen = cpu_to_le16(buf_size);
cmd->type = mib_type;
@@ -6025,12 +6291,12 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
int ice_lldp_fltr_add_remove(struct ice_hw *hw, struct ice_vsi *vsi, bool add)
{
struct ice_aqc_lldp_filter_ctrl *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
if (vsi->type != ICE_VSI_PF || !ice_fw_supports_lldp_fltr_ctrl(hw))
return -EOPNOTSUPP;
- cmd = &desc.params.lldp_filter_ctrl;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
@@ -6050,7 +6316,7 @@ int ice_lldp_fltr_add_remove(struct ice_hw *hw, struct ice_vsi *vsi, bool add)
*/
int ice_lldp_execute_pending_mib(struct ice_hw *hw)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib);
@@ -6106,3 +6372,64 @@ u32 ice_get_link_speed(u16 index)
return ice_aq_to_link_speed[index];
}
+
+/**
+ * ice_read_cgu_reg - Read a CGU register
+ * @hw: Pointer to the HW struct
+ * @addr: Register address to read
+ * @val: Storage for register value read
+ *
+ * Read the contents of a register of the Clock Generation Unit. Only
+ * applicable to E82X devices.
+ *
+ * Return: 0 on success, other error codes when failed to read from CGU.
+ */
+int ice_read_cgu_reg(struct ice_hw *hw, u32 addr, u32 *val)
+{
+ struct ice_sbq_msg_input cgu_msg = {
+ .opcode = ice_sbq_msg_rd,
+ .dest_dev = ice_sbq_dev_cgu,
+ .msg_addr_low = addr
+ };
+ int err;
+
+ err = ice_sbq_rw_reg(hw, &cgu_msg, LIBIE_AQ_FLAG_RD);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
+ addr, err);
+ return err;
+ }
+
+ *val = cgu_msg.data;
+
+ return 0;
+}
+
+/**
+ * ice_write_cgu_reg - Write a CGU register
+ * @hw: Pointer to the HW struct
+ * @addr: Register address to write
+ * @val: Value to write into the register
+ *
+ * Write the specified value to a register of the Clock Generation Unit. Only
+ * applicable to E82X devices.
+ *
+ * Return: 0 on success, other error codes when failed to write to CGU.
+ */
+int ice_write_cgu_reg(struct ice_hw *hw, u32 addr, u32 val)
+{
+ struct ice_sbq_msg_input cgu_msg = {
+ .opcode = ice_sbq_msg_wr,
+ .dest_dev = ice_sbq_dev_cgu,
+ .msg_addr_low = addr,
+ .data = val
+ };
+ int err;
+
+ err = ice_sbq_rw_reg(hw, &cgu_msg, LIBIE_AQ_FLAG_RD);
+ if (err)
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
+ addr, err);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 64c530b39191..60320cdf7804 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -39,6 +39,47 @@
#define FEC_RECEIVER_ID_PCS0 (0x33 << FEC_RECV_ID_SHIFT)
#define FEC_RECEIVER_ID_PCS1 (0x34 << FEC_RECV_ID_SHIFT)
+#define ICE_CGU_R9 0x24
+#define ICE_CGU_R9_TIME_REF_FREQ_SEL GENMASK(2, 0)
+#define ICE_CGU_R9_CLK_EREF0_EN BIT(4)
+#define ICE_CGU_R9_TIME_REF_EN BIT(5)
+#define ICE_CGU_R9_TIME_SYNC_EN BIT(6)
+#define ICE_CGU_R9_ONE_PPS_OUT_EN BIT(7)
+#define ICE_CGU_R9_ONE_PPS_OUT_AMP GENMASK(19, 18)
+
+#define ICE_CGU_R16 0x40
+#define ICE_CGU_R16_TSPLL_CK_REFCLKFREQ GENMASK(31, 24)
+
+#define ICE_CGU_R19 0x4C
+#define ICE_CGU_R19_TSPLL_FBDIV_INTGR_E82X GENMASK(7, 0)
+#define ICE_CGU_R19_TSPLL_FBDIV_INTGR_E825 GENMASK(9, 0)
+#define ICE_CGU_R19_TSPLL_NDIVRATIO GENMASK(19, 16)
+
+#define ICE_CGU_R22 0x58
+#define ICE_CGU_R22_TIME1588CLK_DIV GENMASK(23, 20)
+#define ICE_CGU_R22_TIME1588CLK_DIV2 BIT(30)
+
+#define ICE_CGU_R23 0x5C
+#define ICE_CGU_R24 0x60
+#define ICE_CGU_R24_FBDIV_FRAC GENMASK(21, 0)
+#define ICE_CGU_R23_R24_TSPLL_ENABLE BIT(24)
+#define ICE_CGU_R23_R24_REF1588_CK_DIV GENMASK(30, 27)
+#define ICE_CGU_R23_R24_TIME_REF_SEL BIT(31)
+
+#define ICE_CGU_BW_TDC 0x31C
+#define ICE_CGU_BW_TDC_PLLLOCK_SEL GENMASK(30, 29)
+
+#define ICE_CGU_RO_LOCK 0x3F0
+#define ICE_CGU_RO_LOCK_TRUE_LOCK BIT(12)
+#define ICE_CGU_RO_LOCK_UNLOCK BIT(13)
+
+#define ICE_CGU_CNTR_BIST 0x344
+#define ICE_CGU_CNTR_BIST_PLLLOCK_SEL_0 BIT(15)
+#define ICE_CGU_CNTR_BIST_PLLLOCK_SEL_1 BIT(16)
+
+#define ICE_CGU_RO_BWM_LF 0x370
+#define ICE_CGU_RO_BWM_LF_TRUE_LOCK BIT(12)
+
int ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
int ice_check_reset(struct ice_hw *hw);
@@ -68,7 +109,7 @@ bool ice_is_sbq_supported(struct ice_hw *hw);
struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw);
int
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
- struct ice_aq_desc *desc, void *buf, u16 buf_size,
+ struct libie_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
void ice_clear_pxe_mode(struct ice_hw *hw);
int ice_get_caps(struct ice_hw *hw);
@@ -77,6 +118,12 @@ void ice_set_safe_mode_caps(struct ice_hw *hw);
int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
+int ice_read_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
+ u32 rxq_index);
+int ice_read_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx,
+ u32 txq_index);
+int ice_write_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx,
+ u32 txq_index);
int
ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params);
@@ -91,14 +138,14 @@ ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
-void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
+void ice_fill_dflt_direct_cmd_desc(struct libie_aq_desc *desc, u16 opcode);
void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf);
extern struct mutex ice_global_cfg_lock_sw;
int
-ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc,
+ice_aq_send_cmd(struct ice_hw *hw, struct libie_aq_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd);
int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);
@@ -229,6 +276,9 @@ void ice_replay_post(struct ice_hw *hw);
struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flag);
+int ice_aq_get_cgu_input_pin_measure(struct ice_hw *hw, u8 dpll_idx,
+ struct ice_cgu_input_measure *meas,
+ u16 meas_num);
int
ice_aq_get_cgu_abilities(struct ice_hw *hw,
struct ice_aqc_get_cgu_abilities *abilities);
@@ -303,4 +353,6 @@ ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle);
int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data);
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
+int ice_read_cgu_reg(struct ice_hw *hw, u32 addr, u32 *val);
+int ice_write_cgu_reg(struct ice_hw *hw, u32 addr, u32 val);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index e3959ad442a2..dcb837cadd18 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -90,7 +90,7 @@ bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
static int
ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
+ size_t size = cq->num_sq_entries * sizeof(struct libie_aq_desc);
cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
&cq->sq.desc_buf.pa,
@@ -110,7 +110,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
static int
ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
+ size_t size = cq->num_rq_entries * sizeof(struct libie_aq_desc);
cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
&cq->rq.desc_buf.pa,
@@ -159,7 +159,7 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
/* allocate the mapped buffers */
for (i = 0; i < cq->num_rq_entries; i++) {
- struct ice_aq_desc *desc;
+ struct libie_aq_desc *desc;
struct ice_dma_mem *bi;
bi = &cq->rq.r.rq_bi[i];
@@ -173,9 +173,9 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
/* now configure the descriptors for use */
desc = ICE_CTL_Q_DESC(cq->rq, i);
- desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
- if (cq->rq_buf_size > ICE_AQ_LG_BUF)
- desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_BUF);
+ if (cq->rq_buf_size > LIBIE_AQ_LG_BUF)
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
desc->opcode = 0;
/* This is in accordance with control queue design, there is no
* register for buffer size configuration
@@ -858,7 +858,7 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
struct ice_ctl_q_ring *sq = &cq->sq;
u16 ntc = sq->next_to_clean;
- struct ice_aq_desc *desc;
+ struct libie_aq_desc *desc;
desc = ICE_CTL_Q_DESC(*sq, ntc);
@@ -912,7 +912,7 @@ static const char *ice_ctl_q_str(enum ice_ctl_q qtype)
static void ice_debug_cq(struct ice_hw *hw, struct ice_ctl_q_info *cq,
void *desc, void *buf, u16 buf_len, bool response)
{
- struct ice_aq_desc *cq_desc = desc;
+ struct libie_aq_desc *cq_desc = desc;
u16 datalen, flags;
if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) &&
@@ -939,7 +939,8 @@ static void ice_debug_cq(struct ice_hw *hw, struct ice_ctl_q_info *cq,
* by the DD and/or CMP flag set or a command with the RD flag set.
*/
if (buf && cq_desc->datalen &&
- (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP | ICE_AQ_FLAG_RD))) {
+ (flags & (LIBIE_AQ_FLAG_DD | LIBIE_AQ_FLAG_CMP |
+ LIBIE_AQ_FLAG_RD))) {
char prefix[] = KBUILD_MODNAME " 0x12341234 0x12341234 ";
sprintf(prefix, KBUILD_MODNAME " 0x%08X 0x%08X ",
@@ -992,11 +993,11 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
*/
int
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
- struct ice_aq_desc *desc, void *buf, u16 buf_size,
+ struct libie_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_dma_mem *dma_buf = NULL;
- struct ice_aq_desc *desc_on_ring;
+ struct libie_aq_desc *desc_on_ring;
bool cmd_completed = false;
int status = 0;
u16 retval = 0;
@@ -1007,7 +1008,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
return -EBUSY;
mutex_lock(&cq->sq_lock);
- cq->sq_last_status = ICE_AQ_RC_OK;
+ cq->sq_last_status = LIBIE_AQ_RC_OK;
if (!cq->sq.count) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
@@ -1028,9 +1029,9 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
goto sq_send_command_error;
}
- desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
- if (buf_size > ICE_AQ_LG_BUF)
- desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_BUF);
+ if (buf_size > LIBIE_AQ_LG_BUF)
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
}
val = rd32(hw, cq->sq.head);
@@ -1112,9 +1113,9 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
retval &= 0xff;
}
cmd_completed = true;
- if (!status && retval != ICE_AQ_RC_OK)
+ if (!status && retval != LIBIE_AQ_RC_OK)
status = -EIO;
- cq->sq_last_status = (enum ice_aq_err)retval;
+ cq->sq_last_status = (enum libie_aq_err)retval;
}
ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
@@ -1149,12 +1150,12 @@ sq_send_command_error:
*
* Fill the desc with default values
*/
-void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
+void ice_fill_dflt_direct_cmd_desc(struct libie_aq_desc *desc, u16 opcode)
{
/* zero out the desc */
memset(desc, 0, sizeof(*desc));
desc->opcode = cpu_to_le16(opcode);
- desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_SI);
}
/**
@@ -1172,9 +1173,9 @@ int
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_rq_event_info *e, u16 *pending)
{
+ enum libie_aq_err rq_last_status;
u16 ntc = cq->rq.next_to_clean;
- enum ice_aq_err rq_last_status;
- struct ice_aq_desc *desc;
+ struct libie_aq_desc *desc;
struct ice_dma_mem *bi;
int ret_code = 0;
u16 desc_idx;
@@ -1207,9 +1208,9 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
desc = ICE_CTL_Q_DESC(cq->rq, ntc);
desc_idx = ntc;
- rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
+ rq_last_status = (enum libie_aq_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
- if (flags & ICE_AQ_FLAG_ERR) {
+ if (flags & LIBIE_AQ_FLAG_ERR) {
ret_code = -EIO;
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
le16_to_cpu(desc->opcode), rq_last_status);
@@ -1230,9 +1231,9 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
bi = &cq->rq.r.rq_bi[ntc];
memset(desc, 0, sizeof(*desc));
- desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
- if (cq->rq_buf_size > ICE_AQ_LG_BUF)
- desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_BUF);
+ if (cq->rq_buf_size > LIBIE_AQ_LG_BUF)
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
desc->datalen = cpu_to_le16(bi->size);
desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index ca97b7365a1b..788040dd662e 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -12,7 +12,7 @@
#define ICE_SBQ_MAX_BUF_LEN 512
#define ICE_CTL_Q_DESC(R, i) \
- (&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))
+ (&(((struct libie_aq_desc *)((R).desc_buf.va))[i]))
#define ICE_CTL_Q_DESC_UNUSED(R) \
((u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
@@ -76,12 +76,12 @@ struct ice_ctl_q_ring {
/* sq transaction details */
struct ice_sq_cd {
- struct ice_aq_desc *wb_desc;
+ struct libie_aq_desc *wb_desc;
};
/* rq event information */
struct ice_rq_event_info {
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 msg_len;
u16 buf_len;
u8 *msg_buf;
@@ -96,7 +96,7 @@ struct ice_ctl_q_info {
u16 num_sq_entries; /* send queue depth */
u16 rq_buf_size; /* receive queue buffer size */
u16 sq_buf_size; /* send queue buffer size */
- enum ice_aq_err sq_last_status; /* last status on send queue */
+ enum libie_aq_err sq_last_status; /* last status on send queue */
struct mutex sq_lock; /* Send queue lock */
struct mutex rq_lock; /* Receive queue lock */
};
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 64737fc62306..abea84f14658 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -24,10 +24,10 @@ ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_get_mib *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.lldp_get_mib;
+ cmd = libie_aq_raw(&desc);
if (buf_size == 0 || !buf)
return -EINVAL;
@@ -64,9 +64,9 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_set_mib_change *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.lldp_set_event;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_mib_change);
@@ -95,9 +95,9 @@ ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_stop *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.lldp_stop;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_stop);
@@ -121,9 +121,9 @@ ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
int ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_start *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.lldp_start;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_start);
@@ -677,11 +677,11 @@ ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
bool *dcbx_agent_status, struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_stop_start_specific_agent *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 opcode;
int status;
- cmd = &desc.params.lldp_agent_ctrl;
+ cmd = libie_aq_raw(&desc);
opcode = ice_aqc_opc_lldp_stop_start_specific_agent;
@@ -714,7 +714,7 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
struct ice_aqc_get_cee_dcb_cfg_resp *buff,
struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cee_dcb_cfg);
@@ -733,13 +733,13 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd)
{
struct ice_aqc_set_query_pfc_mode *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
if (pfc_mode > ICE_AQC_PFC_DSCP_BASED_PFC)
return -EINVAL;
- cmd = &desc.params.set_query_pfc_mode;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_pfc_mode);
@@ -914,7 +914,7 @@ static int ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg);
/* Don't treat ENOENT as an error for Remote MIBs */
- if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
+ if (pi->hw->adminq.sq_last_status == LIBIE_AQ_RC_ENOENT)
ret = 0;
out:
@@ -941,7 +941,7 @@ int ice_get_dcb_cfg(struct ice_port_info *pi)
/* CEE mode */
ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE);
ice_cee_to_dcb_cfg(&cee_cfg, pi);
- } else if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) {
+ } else if (pi->hw->adminq.sq_last_status == LIBIE_AQ_RC_ENOENT) {
/* CEE mode not enabled try querying IEEE data */
dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE;
@@ -965,7 +965,7 @@ void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi,
struct ice_aqc_lldp_get_mib *mib;
u8 change_type, dcbx_mode;
- mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
+ mib = libie_aq_raw(&event->desc);
change_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type);
if (change_type == ICE_AQ_LLDP_MIB_REMOTE)
@@ -1537,12 +1537,12 @@ ice_aq_query_port_ets(struct ice_port_info *pi,
struct ice_sq_cd *cd)
{
struct ice_aqc_query_port_ets *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
if (!pi)
return -EINVAL;
- cmd = &desc.params.port_ets;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_port_ets);
cmd->port_teid = pi->root->info.node_teid;
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 533eb8930aa8..9fc8681cc58e 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -1020,7 +1020,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
}
pi = pf->hw.port_info;
- mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
+ mib = libie_aq_raw(&event->desc);
/* Ignore if event is not for Nearest Bridge */
mib_type = FIELD_GET(ICE_AQ_LLDP_BRID_TYPE_M, mib->type);
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index 59323c019544..e2a036ce76ca 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -1101,16 +1101,16 @@ struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
return &bld->buf;
}
-static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err)
+static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum libie_aq_err aq_err)
{
switch (aq_err) {
- case ICE_AQ_RC_ENOSEC:
- case ICE_AQ_RC_EBADSIG:
+ case LIBIE_AQ_RC_ENOSEC:
+ case LIBIE_AQ_RC_EBADSIG:
return ICE_DDP_PKG_FILE_SIGNATURE_INVALID;
- case ICE_AQ_RC_ESVN:
+ case LIBIE_AQ_RC_ESVN:
return ICE_DDP_PKG_FILE_REVISION_TOO_LOW;
- case ICE_AQ_RC_EBADMAN:
- case ICE_AQ_RC_EBADBUF:
+ case LIBIE_AQ_RC_EBADMAN:
+ case LIBIE_AQ_RC_EBADBUF:
return ICE_DDP_PKG_LOAD_ERROR;
default:
return ICE_DDP_PKG_ERR;
@@ -1180,7 +1180,7 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u32 *error_info, struct ice_sq_cd *cd)
{
struct ice_aqc_download_pkg *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
if (error_offset)
@@ -1188,9 +1188,9 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
if (error_info)
*error_info = 0;
- cmd = &desc.params.download_pkg;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
if (last_buf)
cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
@@ -1259,7 +1259,7 @@ static enum ice_ddp_state ice_ddp_send_hunk(struct ice_ddp_send_ctx *ctx,
struct ice_buf_hdr *prev_hunk = ctx->hdr;
struct ice_hw *hw = ctx->hw;
bool prev_was_last = !hunk;
- enum ice_aq_err aq_err;
+ enum libie_aq_err aq_err;
u32 offset, info;
int attempt, err;
@@ -1278,7 +1278,8 @@ static enum ice_ddp_state ice_ddp_send_hunk(struct ice_ddp_send_ctx *ctx,
prev_was_last, &offset, &info, NULL);
aq_err = hw->adminq.sq_last_status;
- if (aq_err != ICE_AQ_RC_ENOSEC && aq_err != ICE_AQ_RC_EBADSIG)
+ if (aq_err != LIBIE_AQ_RC_ENOSEC &&
+ aq_err != LIBIE_AQ_RC_EBADSIG)
break;
}
@@ -1537,7 +1538,7 @@ ice_post_dwnld_pkg_actions(struct ice_hw *hw)
static enum ice_ddp_state
ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
{
- enum ice_aq_err aq_err = hw->adminq.sq_last_status;
+ enum libie_aq_err aq_err = hw->adminq.sq_last_status;
enum ice_ddp_state state = ICE_DDP_PKG_ERR;
struct ice_ddp_send_ctx ctx = { .hw = hw };
int status;
@@ -1687,7 +1688,7 @@ static int ice_aq_get_pkg_info_list(struct ice_hw *hw,
struct ice_aqc_get_pkg_info_resp *pkg_info,
u16 buf_size, struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
@@ -1711,7 +1712,7 @@ static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u32 *error_info, struct ice_sq_cd *cd)
{
struct ice_aqc_download_pkg *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
if (error_offset)
@@ -1719,9 +1720,9 @@ static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
if (error_info)
*error_info = 0;
- cmd = &desc.params.download_pkg;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
if (last_buf)
cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
@@ -1753,10 +1754,10 @@ static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u16 buf_size, struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
}
@@ -2301,6 +2302,8 @@ enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf,
return ICE_DDP_PKG_ERR;
buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
+ if (!buf_copy)
+ return ICE_DDP_PKG_ERR;
state = ice_init_pkg(hw, buf_copy, len);
if (!ice_is_init_pkg_successful(state)) {
@@ -2333,10 +2336,10 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
struct ice_sq_cd *cd, u8 *flags, bool set)
{
struct ice_aqc_get_set_tx_topo *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.get_set_tx_topo;
+ cmd = libie_aq_raw(&desc);
if (set) {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_tx_topo);
cmd->set_flags = ICE_AQC_TX_TOPO_FLAGS_ISSUED;
@@ -2345,14 +2348,14 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM |
ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW;
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
} else {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo);
cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM;
if (hw->mac_type == ICE_MAC_E810 ||
hw->mac_type == ICE_MAC_GENERIC)
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
}
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
@@ -2360,7 +2363,7 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
return status;
/* read the return flag values (first byte) for get operation */
if (!set && flags)
- *flags = desc.params.get_set_tx_topo.set_flags;
+ *flags = cmd->set_flags;
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_debugfs.c b/drivers/net/ethernet/intel/ice/ice_debugfs.c
index 9fc0fd95a13d..cb71eca6a85b 100644
--- a/drivers/net/ethernet/intel/ice/ice_debugfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_debugfs.c
@@ -606,7 +606,7 @@ void ice_debugfs_fwlog_init(struct ice_pf *pf)
pf->ice_debugfs_pf_fwlog = debugfs_create_dir("fwlog",
pf->ice_debugfs_pf);
- if (IS_ERR(pf->ice_debugfs_pf))
+ if (IS_ERR(pf->ice_debugfs_pf_fwlog))
goto err_create_module_files;
fw_modules_dir = debugfs_create_dir("modules",
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index 34fd604132f5..bd4e66df0372 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -6,6 +6,24 @@
/* Device IDs */
#define ICE_DEV_ID_E822_SI_DFLT 0x1888
+/* Intel(R) Ethernet Controller E835-CC for backplane */
+#define ICE_DEV_ID_E835CC_BACKPLANE 0x1248
+/* Intel(R) Ethernet Controller E835-CC for QSFP */
+#define ICE_DEV_ID_E835CC_QSFP56 0x1249
+/* Intel(R) Ethernet Controller E835-CC for SFP */
+#define ICE_DEV_ID_E835CC_SFP 0x124A
+/* Intel(R) Ethernet Controller E835-C for backplane */
+#define ICE_DEV_ID_E835C_BACKPLANE 0x1261
+/* Intel(R) Ethernet Controller E835-C for QSFP */
+#define ICE_DEV_ID_E835C_QSFP 0x1262
+/* Intel(R) Ethernet Controller E835-C for SFP */
+#define ICE_DEV_ID_E835C_SFP 0x1263
+/* Intel(R) Ethernet Controller E835-L for backplane */
+#define ICE_DEV_ID_E835_L_BACKPLANE 0x1265
+/* Intel(R) Ethernet Controller E835-L for QSFP */
+#define ICE_DEV_ID_E835_L_QSFP 0x1266
+/* Intel(R) Ethernet Controller E835-L for SFP */
+#define ICE_DEV_ID_E835_L_SFP 0x1267
/* Intel(R) Ethernet Connection E823-L for backplane */
#define ICE_DEV_ID_E823L_BACKPLANE 0x124C
/* Intel(R) Ethernet Connection E823-L for SFP */
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index bce3ad6ca2a6..53b54e395a2e 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -11,6 +11,43 @@
#define ICE_DPLL_RCLK_NUM_PER_PF 1
#define ICE_DPLL_PIN_ESYNC_PULSE_HIGH_PERCENT 25
#define ICE_DPLL_PIN_GEN_RCLK_FREQ 1953125
+#define ICE_DPLL_PIN_PRIO_OUTPUT 0xff
+#define ICE_DPLL_INPUT_REF_NUM 10
+#define ICE_DPLL_PHASE_OFFSET_PERIOD 2
+#define ICE_DPLL_SW_PIN_INPUT_BASE_SFP 4
+#define ICE_DPLL_SW_PIN_INPUT_BASE_QSFP 6
+#define ICE_DPLL_SW_PIN_OUTPUT_BASE 0
+
+#define ICE_DPLL_PIN_SW_INPUT_ABS(in_idx) \
+ (ICE_DPLL_SW_PIN_INPUT_BASE_SFP + (in_idx))
+
+#define ICE_DPLL_PIN_SW_1_INPUT_ABS_IDX \
+ (ICE_DPLL_PIN_SW_INPUT_ABS(ICE_DPLL_PIN_SW_1_IDX))
+
+#define ICE_DPLL_PIN_SW_2_INPUT_ABS_IDX \
+ (ICE_DPLL_PIN_SW_INPUT_ABS(ICE_DPLL_PIN_SW_2_IDX))
+
+#define ICE_DPLL_PIN_SW_OUTPUT_ABS(out_idx) \
+ (ICE_DPLL_SW_PIN_OUTPUT_BASE + (out_idx))
+
+#define ICE_DPLL_PIN_SW_1_OUTPUT_ABS_IDX \
+ (ICE_DPLL_PIN_SW_OUTPUT_ABS(ICE_DPLL_PIN_SW_1_IDX))
+
+#define ICE_DPLL_PIN_SW_2_OUTPUT_ABS_IDX \
+ (ICE_DPLL_PIN_SW_OUTPUT_ABS(ICE_DPLL_PIN_SW_2_IDX))
+
+#define ICE_SR_PFA_DPLL_DEFAULTS 0x152
+#define ICE_DPLL_PFA_REF_SYNC_TYPE 0x2420
+#define ICE_DPLL_PFA_REF_SYNC_TYPE2 0x2424
+#define ICE_DPLL_PFA_END 0xFFFF
+#define ICE_DPLL_PFA_HEADER_LEN 4
+#define ICE_DPLL_PFA_ENTRY_LEN 3
+#define ICE_DPLL_PFA_MAILBOX_REF_SYNC_PIN_S 4
+#define ICE_DPLL_PFA_MASK_OFFSET 1
+#define ICE_DPLL_PFA_VALUE_OFFSET 2
+
+#define ICE_DPLL_E810C_SFP_NC_PINS 2
+#define ICE_DPLL_E810C_SFP_NC_START 4
/**
* enum ice_dpll_pin_type - enumerate ice pin types:
@@ -18,25 +55,61 @@
* @ICE_DPLL_PIN_TYPE_INPUT: input pin
* @ICE_DPLL_PIN_TYPE_OUTPUT: output pin
* @ICE_DPLL_PIN_TYPE_RCLK_INPUT: recovery clock input pin
+ * @ICE_DPLL_PIN_TYPE_SOFTWARE: software controlled SMA/U.FL pins
*/
enum ice_dpll_pin_type {
ICE_DPLL_PIN_INVALID,
ICE_DPLL_PIN_TYPE_INPUT,
ICE_DPLL_PIN_TYPE_OUTPUT,
ICE_DPLL_PIN_TYPE_RCLK_INPUT,
+ ICE_DPLL_PIN_TYPE_SOFTWARE,
};
static const char * const pin_type_name[] = {
[ICE_DPLL_PIN_TYPE_INPUT] = "input",
[ICE_DPLL_PIN_TYPE_OUTPUT] = "output",
[ICE_DPLL_PIN_TYPE_RCLK_INPUT] = "rclk-input",
+ [ICE_DPLL_PIN_TYPE_SOFTWARE] = "software",
};
+static const char * const ice_dpll_sw_pin_sma[] = { "SMA1", "SMA2" };
+static const char * const ice_dpll_sw_pin_ufl[] = { "U.FL1", "U.FL2" };
+
static const struct dpll_pin_frequency ice_esync_range[] = {
DPLL_PIN_FREQUENCY_RANGE(0, DPLL_PIN_FREQUENCY_1_HZ),
};
/**
+ * ice_dpll_is_sw_pin - check if given pin shall be controlled by SW
+ * @pf: private board structure
+ * @index: index of a pin as understood by FW
+ * @input: true for input, false for output
+ *
+ * Check if the pin shall be controlled by SW - instead of providing raw access
+ * for pin control. For E810 NIC with dpll there is additional MUX-related logic
+ * between SMA/U.FL pins/connectors and dpll device, best to give user access
+ * with series of wrapper functions as from user perspective they convey single
+ * functionality rather then separated pins.
+ *
+ * Return:
+ * * true - pin controlled by SW
+ * * false - pin not controlled by SW
+ */
+static bool ice_dpll_is_sw_pin(struct ice_pf *pf, u8 index, bool input)
+{
+ if (input && pf->hw.device_id == ICE_DEV_ID_E810C_QSFP)
+ index -= ICE_DPLL_SW_PIN_INPUT_BASE_QSFP -
+ ICE_DPLL_SW_PIN_INPUT_BASE_SFP;
+
+ if ((input && (index == ICE_DPLL_PIN_SW_1_INPUT_ABS_IDX ||
+ index == ICE_DPLL_PIN_SW_2_INPUT_ABS_IDX)) ||
+ (!input && (index == ICE_DPLL_PIN_SW_1_OUTPUT_ABS_IDX ||
+ index == ICE_DPLL_PIN_SW_2_OUTPUT_ABS_IDX)))
+ return true;
+ return false;
+}
+
+/**
* ice_dpll_is_reset - check if reset is in progress
* @pf: private board structure
* @extack: error reporting
@@ -97,7 +170,7 @@ ice_dpll_pin_freq_set(struct ice_pf *pf, struct ice_dpll_pin *pin,
NL_SET_ERR_MSG_FMT(extack,
"err:%d %s failed to set pin freq:%u on pin:%u",
ret,
- ice_aq_str(pf->hw.adminq.sq_last_status),
+ libie_aq_str(pf->hw.adminq.sq_last_status),
freq, pin->idx);
return ret;
}
@@ -280,6 +353,87 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv,
}
/**
+ * ice_dpll_sw_pin_frequency_set - callback to set frequency of SW pin
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: pointer to dpll
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @frequency: on success holds pin's frequency
+ * @extack: error reporting
+ *
+ * Calls set frequency command for corresponding and active input/output pin.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error pin not active or couldn't get from hw
+ */
+static int
+ice_dpll_sw_pin_frequency_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 frequency, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *sma = pin_priv;
+ int ret;
+
+ if (!sma->active) {
+ NL_SET_ERR_MSG(extack, "pin is not active");
+ return -EINVAL;
+ }
+ if (sma->direction == DPLL_PIN_DIRECTION_INPUT)
+ ret = ice_dpll_input_frequency_set(NULL, sma->input, dpll,
+ dpll_priv, frequency,
+ extack);
+ else
+ ret = ice_dpll_output_frequency_set(NULL, sma->output, dpll,
+ dpll_priv, frequency,
+ extack);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_sw_pin_frequency_get - callback for get frequency of SW pin
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: pointer to dpll
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @frequency: on success holds pin's frequency
+ * @extack: error reporting
+ *
+ * Calls get frequency command for corresponding active input/output.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error pin not active or couldn't get from hw
+ */
+static int
+ice_dpll_sw_pin_frequency_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 *frequency, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *sma = pin_priv;
+ int ret;
+
+ if (!sma->active) {
+ *frequency = 0;
+ return 0;
+ }
+ if (sma->direction == DPLL_PIN_DIRECTION_INPUT) {
+ ret = ice_dpll_input_frequency_get(NULL, sma->input, dpll,
+ dpll_priv, frequency,
+ extack);
+ } else {
+ ret = ice_dpll_output_frequency_get(NULL, sma->output, dpll,
+ dpll_priv, frequency,
+ extack);
+ }
+
+ return ret;
+}
+
+/**
* ice_dpll_pin_enable - enable a pin on dplls
* @hw: board private hw structure
* @pin: pointer to a pin
@@ -323,7 +477,7 @@ ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin,
if (ret)
NL_SET_ERR_MSG_FMT(extack,
"err:%d %s failed to enable %s pin:%u",
- ret, ice_aq_str(hw->adminq.sq_last_status),
+ ret, libie_aq_str(hw->adminq.sq_last_status),
pin_type_name[pin_type], pin->idx);
return ret;
@@ -368,13 +522,74 @@ ice_dpll_pin_disable(struct ice_hw *hw, struct ice_dpll_pin *pin,
if (ret)
NL_SET_ERR_MSG_FMT(extack,
"err:%d %s failed to disable %s pin:%u",
- ret, ice_aq_str(hw->adminq.sq_last_status),
+ ret, libie_aq_str(hw->adminq.sq_last_status),
pin_type_name[pin_type], pin->idx);
return ret;
}
/**
+ * ice_dpll_sw_pins_update - update status of all SW pins
+ * @pf: private board struct
+ *
+ * Determine and update pin struct fields (direction/active) of their current
+ * values for all the SW controlled pins.
+ *
+ * Context: Call with pf->dplls.lock held
+ * Return:
+ * * 0 - OK
+ * * negative - error
+ */
+static int
+ice_dpll_sw_pins_update(struct ice_pf *pf)
+{
+ struct ice_dplls *d = &pf->dplls;
+ struct ice_dpll_pin *p;
+ u8 data = 0;
+ int ret;
+
+ ret = ice_read_sma_ctrl(&pf->hw, &data);
+ if (ret)
+ return ret;
+ /* no change since last check */
+ if (d->sma_data == data)
+ return 0;
+
+ /*
+ * SMA1/U.FL1 vs SMA2/U.FL2 are using different bit scheme to decide
+ * on their direction and if are active
+ */
+ p = &d->sma[ICE_DPLL_PIN_SW_1_IDX];
+ p->active = true;
+ p->direction = DPLL_PIN_DIRECTION_INPUT;
+ if (data & ICE_SMA1_DIR_EN) {
+ p->direction = DPLL_PIN_DIRECTION_OUTPUT;
+ if (data & ICE_SMA1_TX_EN)
+ p->active = false;
+ }
+
+ p = &d->sma[ICE_DPLL_PIN_SW_2_IDX];
+ p->active = true;
+ p->direction = DPLL_PIN_DIRECTION_INPUT;
+ if ((data & ICE_SMA2_INACTIVE_MASK) == ICE_SMA2_INACTIVE_MASK)
+ p->active = false;
+ else if (data & ICE_SMA2_DIR_EN)
+ p->direction = DPLL_PIN_DIRECTION_OUTPUT;
+
+ p = &d->ufl[ICE_DPLL_PIN_SW_1_IDX];
+ if (!(data & (ICE_SMA1_DIR_EN | ICE_SMA1_TX_EN)))
+ p->active = true;
+ else
+ p->active = false;
+
+ p = &d->ufl[ICE_DPLL_PIN_SW_2_IDX];
+ p->active = (data & ICE_SMA2_DIR_EN) && !(data & ICE_SMA2_UFL2_RX_DIS);
+ d->sma_data = data;
+
+ return 0;
+}
+
+/**
* ice_dpll_pin_state_update - update pin's state
* @pf: private board struct
* @pin: structure with pin attributes to be updated
@@ -471,6 +686,11 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
DPLL_PIN_STATE_DISCONNECTED;
}
break;
+ case ICE_DPLL_PIN_TYPE_SOFTWARE:
+ ret = ice_dpll_sw_pins_update(pf);
+ if (ret)
+ goto err;
+ break;
default:
return -EINVAL;
}
@@ -481,13 +701,13 @@ err:
NL_SET_ERR_MSG_FMT(extack,
"err:%d %s failed to update %s pin:%u",
ret,
- ice_aq_str(pf->hw.adminq.sq_last_status),
+ libie_aq_str(pf->hw.adminq.sq_last_status),
pin_type_name[pin_type], pin->idx);
else
dev_err_ratelimited(ice_pf_to_dev(pf),
"err:%d %s failed to update %s pin:%u\n",
ret,
- ice_aq_str(pf->hw.adminq.sq_last_status),
+ libie_aq_str(pf->hw.adminq.sq_last_status),
pin_type_name[pin_type], pin->idx);
return ret;
}
@@ -520,7 +740,7 @@ ice_dpll_hw_input_prio_set(struct ice_pf *pf, struct ice_dpll *dpll,
NL_SET_ERR_MSG_FMT(extack,
"err:%d %s failed to set pin prio:%u on pin:%u",
ret,
- ice_aq_str(pf->hw.adminq.sq_last_status),
+ libie_aq_str(pf->hw.adminq.sq_last_status),
prio, pin->idx);
else
dpll->input_prio[pin->idx] = prio;
@@ -588,6 +808,67 @@ static int ice_dpll_mode_get(const struct dpll_device *dpll, void *dpll_priv,
}
/**
+ * ice_dpll_phase_offset_monitor_set - set phase offset monitor state
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: feature state to be set
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Enable/disable phase offset monitor feature of dpll.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return: 0 - success
+ */
+static int ice_dpll_phase_offset_monitor_set(const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_feature_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ if (state == DPLL_FEATURE_STATE_ENABLE)
+ d->phase_offset_monitor_period = ICE_DPLL_PHASE_OFFSET_PERIOD;
+ else
+ d->phase_offset_monitor_period = 0;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
+ * ice_dpll_phase_offset_monitor_get - get phase offset monitor state
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: on success holds current state of phase offset monitor
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Provides current state of phase offset monitor
+ * features on dpll device.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return: 0 - success
+ */
+static int ice_dpll_phase_offset_monitor_get(const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_feature_state *state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ if (d->phase_offset_monitor_period)
+ *state = DPLL_FEATURE_STATE_ENABLE;
+ else
+ *state = DPLL_FEATURE_STATE_DISABLE;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
* ice_dpll_pin_state_set - set pin's state on dpll
* @pin: pointer to a pin
* @pin_priv: private data pointer passed on pin registration
@@ -793,6 +1074,270 @@ ice_dpll_input_state_get(const struct dpll_pin *pin, void *pin_priv,
}
/**
+ * ice_dpll_sma_direction_set - set direction of SMA pin
+ * @p: pointer to a pin
+ * @direction: requested direction of the pin
+ * @extack: error reporting
+ *
+ * Wrapper for dpll subsystem callback. Set direction of a SMA pin.
+ *
+ * Context: Call with pf->dplls.lock held
+ * Return:
+ * * 0 - success
+ * * negative - failed to get state
+ */
+static int ice_dpll_sma_direction_set(struct ice_dpll_pin *p,
+ enum dpll_pin_direction direction,
+ struct netlink_ext_ack *extack)
+{
+ u8 data;
+ int ret;
+
+ if (p->direction == direction && p->active)
+ return 0;
+ ret = ice_read_sma_ctrl(&p->pf->hw, &data);
+ if (ret)
+ return ret;
+
+ switch (p->idx) {
+ case ICE_DPLL_PIN_SW_1_IDX:
+ data &= ~ICE_SMA1_MASK;
+ if (direction == DPLL_PIN_DIRECTION_OUTPUT)
+ data |= ICE_SMA1_DIR_EN;
+ break;
+ case ICE_DPLL_PIN_SW_2_IDX:
+ if (direction == DPLL_PIN_DIRECTION_INPUT) {
+ data &= ~ICE_SMA2_DIR_EN;
+ } else {
+ data &= ~ICE_SMA2_TX_EN;
+ data |= ICE_SMA2_DIR_EN;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = ice_write_sma_ctrl(&p->pf->hw, data);
+ if (!ret)
+ ret = ice_dpll_pin_state_update(p->pf, p,
+ ICE_DPLL_PIN_TYPE_SOFTWARE,
+ extack);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_ufl_pin_state_set - set U.FL pin state on dpll device
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: requested state of the pin
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Set the state of a pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_ufl_pin_state_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv, *target;
+ struct ice_dpll *d = dpll_priv;
+ enum ice_dpll_pin_type type;
+ struct ice_pf *pf = p->pf;
+ struct ice_hw *hw;
+ bool enable;
+ u8 data;
+ int ret;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
+ mutex_lock(&pf->dplls.lock);
+ hw = &pf->hw;
+ ret = ice_read_sma_ctrl(hw, &data);
+ if (ret)
+ goto unlock;
+
+ ret = -EINVAL;
+ switch (p->idx) {
+ case ICE_DPLL_PIN_SW_1_IDX:
+ if (state == DPLL_PIN_STATE_CONNECTED) {
+ data &= ~ICE_SMA1_MASK;
+ enable = true;
+ } else if (state == DPLL_PIN_STATE_DISCONNECTED) {
+ data |= ICE_SMA1_TX_EN;
+ enable = false;
+ } else {
+ goto unlock;
+ }
+ target = p->output;
+ type = ICE_DPLL_PIN_TYPE_OUTPUT;
+ break;
+ case ICE_DPLL_PIN_SW_2_IDX:
+ if (state == DPLL_PIN_STATE_SELECTABLE) {
+ data |= ICE_SMA2_DIR_EN;
+ data &= ~ICE_SMA2_UFL2_RX_DIS;
+ enable = true;
+ } else if (state == DPLL_PIN_STATE_DISCONNECTED) {
+ data |= ICE_SMA2_UFL2_RX_DIS;
+ enable = false;
+ } else {
+ goto unlock;
+ }
+ target = p->input;
+ type = ICE_DPLL_PIN_TYPE_INPUT;
+ break;
+ default:
+ goto unlock;
+ }
+
+ ret = ice_write_sma_ctrl(hw, data);
+ if (ret)
+ goto unlock;
+ ret = ice_dpll_pin_state_update(pf, p, ICE_DPLL_PIN_TYPE_SOFTWARE,
+ extack);
+ if (ret)
+ goto unlock;
+
+ if (enable)
+ ret = ice_dpll_pin_enable(hw, target, d->dpll_idx, type, extack);
+ else
+ ret = ice_dpll_pin_disable(hw, target, type, extack);
+ if (!ret)
+ ret = ice_dpll_pin_state_update(pf, target, type, extack);
+
+unlock:
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_sw_pin_state_get - get SW pin state
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: on success holds state of the pin
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Check state of a SW pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_pin_state_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = p->pf;
+ int ret = 0;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ if (!p->active) {
+ *state = DPLL_PIN_STATE_DISCONNECTED;
+ goto unlock;
+ }
+
+ if (p->direction == DPLL_PIN_DIRECTION_INPUT) {
+ ret = ice_dpll_pin_state_update(pf, p->input,
+ ICE_DPLL_PIN_TYPE_INPUT,
+ extack);
+ if (ret)
+ goto unlock;
+ *state = p->input->state[d->dpll_idx];
+ } else {
+ ret = ice_dpll_pin_state_update(pf, p->output,
+ ICE_DPLL_PIN_TYPE_OUTPUT,
+ extack);
+ if (ret)
+ goto unlock;
+ *state = p->output->state[d->dpll_idx];
+ }
+unlock:
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_sma_pin_state_set - set SMA pin state on dpll device
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: requested state of the pin
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Set state of a pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failed to get state
+ */
+static int
+ice_dpll_sma_pin_state_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *sma = pin_priv, *target;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = sma->pf;
+ enum ice_dpll_pin_type type;
+ bool enable;
+ int ret;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
+ mutex_lock(&pf->dplls.lock);
+ if (!sma->active) {
+ ret = ice_dpll_sma_direction_set(sma, sma->direction, extack);
+ if (ret)
+ goto unlock;
+ }
+ if (sma->direction == DPLL_PIN_DIRECTION_INPUT) {
+ enable = state == DPLL_PIN_STATE_SELECTABLE;
+ target = sma->input;
+ type = ICE_DPLL_PIN_TYPE_INPUT;
+ } else {
+ enable = state == DPLL_PIN_STATE_CONNECTED;
+ target = sma->output;
+ type = ICE_DPLL_PIN_TYPE_OUTPUT;
+ }
+
+ if (enable)
+ ret = ice_dpll_pin_enable(&pf->hw, target, d->dpll_idx, type,
+ extack);
+ else
+ ret = ice_dpll_pin_disable(&pf->hw, target, type, extack);
+ if (!ret)
+ ret = ice_dpll_pin_state_update(pf, target, type, extack);
+
+unlock:
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
* ice_dpll_input_prio_get - get dpll's input prio
* @pin: pointer to a pin
* @pin_priv: private data pointer passed on pin registration
@@ -860,6 +1405,47 @@ ice_dpll_input_prio_set(const struct dpll_pin *pin, void *pin_priv,
return ret;
}
+static int
+ice_dpll_sw_input_prio_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u32 *prio, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ if (p->input && p->direction == DPLL_PIN_DIRECTION_INPUT)
+ *prio = d->input_prio[p->input->idx];
+ else
+ *prio = ICE_DPLL_PIN_PRIO_OUTPUT;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+static int
+ice_dpll_sw_input_prio_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u32 prio, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+ int ret;
+
+ if (!p->input || p->direction != DPLL_PIN_DIRECTION_INPUT)
+ return -EINVAL;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
+ mutex_lock(&pf->dplls.lock);
+ ret = ice_dpll_hw_input_prio_set(pf, d, p->input, prio, extack);
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
/**
* ice_dpll_input_direction - callback for get input pin direction
* @pin: pointer to a pin
@@ -911,6 +1497,76 @@ ice_dpll_output_direction(const struct dpll_pin *pin, void *pin_priv,
}
/**
+ * ice_dpll_pin_sma_direction_set - callback for set SMA pin direction
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @direction: requested pin direction
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting direction of a SMA pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_pin_sma_direction_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_direction direction,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_pf *pf = p->pf;
+ int ret;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
+ mutex_lock(&pf->dplls.lock);
+ ret = ice_dpll_sma_direction_set(p, direction, extack);
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_pin_sw_direction_get - callback for get SW pin direction
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @direction: on success holds pin direction
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting direction of a SMA pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_pin_sw_direction_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_direction *direction,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_pf *pf = p->pf;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ *direction = p->direction;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
* ice_dpll_pin_phase_adjust_get - callback for get pin phase adjust value
* @pin: pointer to a pin
* @pin_priv: private data pointer passed on pin registration
@@ -1006,7 +1662,7 @@ ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
NL_SET_ERR_MSG_FMT(extack,
"err:%d %s failed to set pin phase_adjust:%d for pin:%u on dpll:%u",
ret,
- ice_aq_str(pf->hw.adminq.sq_last_status),
+ libie_aq_str(pf->hw.adminq.sq_last_status),
phase_adjust, p->idx, d->dpll_idx);
return ret;
@@ -1024,7 +1680,7 @@ ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
* Dpll subsystem callback. Wraps a handler for setting phase adjust on input
* pin.
*
- * Context: Calls a function which acquires pf->dplls.lock
+ * Context: Calls a function which acquires and releases pf->dplls.lock
* Return:
* * 0 - success
* * negative - error
@@ -1068,6 +1724,82 @@ ice_dpll_output_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
ICE_DPLL_PIN_TYPE_OUTPUT);
}
+/**
+ * ice_dpll_sw_phase_adjust_get - callback for get SW pin phase adjust
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @phase_adjust: on success holds phase adjust value
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Wraps a handler for getting phase adjust on sw
+ * pin.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_phase_adjust_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s32 *phase_adjust,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+
+ if (p->direction == DPLL_PIN_DIRECTION_INPUT)
+ return ice_dpll_pin_phase_adjust_get(p->input->pin, p->input,
+ dpll, dpll_priv,
+ phase_adjust, extack);
+ else
+ return ice_dpll_pin_phase_adjust_get(p->output->pin, p->output,
+ dpll, dpll_priv,
+ phase_adjust, extack);
+}
+
+/**
+ * ice_dpll_sw_phase_adjust_set - callback for set SW pin phase adjust value
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @phase_adjust: phase_adjust to be set
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Wraps a handler for setting phase adjust on output
+ * pin.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s32 phase_adjust,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+
+ if (!p->active) {
+ NL_SET_ERR_MSG(extack, "pin is not active");
+ return -EINVAL;
+ }
+ if (p->direction == DPLL_PIN_DIRECTION_INPUT)
+ return ice_dpll_pin_phase_adjust_set(p->input->pin, p->input,
+ dpll, dpll_priv,
+ phase_adjust, extack,
+ ICE_DPLL_PIN_TYPE_INPUT);
+ else
+ return ice_dpll_pin_phase_adjust_set(p->output->pin, p->output,
+ dpll, dpll_priv,
+ phase_adjust, extack,
+ ICE_DPLL_PIN_TYPE_OUTPUT);
+}
+
#define ICE_DPLL_PHASE_OFFSET_DIVIDER 100
#define ICE_DPLL_PHASE_OFFSET_FACTOR \
(DPLL_PHASE_OFFSET_DIVIDER / ICE_DPLL_PHASE_OFFSET_DIVIDER)
@@ -1093,12 +1825,16 @@ ice_dpll_phase_offset_get(const struct dpll_pin *pin, void *pin_priv,
const struct dpll_device *dpll, void *dpll_priv,
s64 *phase_offset, struct netlink_ext_ack *extack)
{
+ struct ice_dpll_pin *p = pin_priv;
struct ice_dpll *d = dpll_priv;
struct ice_pf *pf = d->pf;
mutex_lock(&pf->dplls.lock);
- if (d->active_input == pin)
+ if (d->active_input == pin || (p->input &&
+ d->active_input == p->input->pin))
*phase_offset = d->phase_offset * ICE_DPLL_PHASE_OFFSET_FACTOR;
+ else if (d->phase_offset_monitor_period)
+ *phase_offset = p->phase_offset * ICE_DPLL_PHASE_OFFSET_FACTOR;
else
*phase_offset = 0;
mutex_unlock(&pf->dplls.lock);
@@ -1315,6 +2051,219 @@ ice_dpll_input_esync_get(const struct dpll_pin *pin, void *pin_priv,
}
/**
+ * ice_dpll_sw_esync_set - callback for setting embedded sync on SW pin
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @freq: requested embedded sync frequency
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting embedded sync frequency value
+ * on SW pin.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_esync_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 freq, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+
+ if (!p->active) {
+ NL_SET_ERR_MSG(extack, "pin is not active");
+ return -EINVAL;
+ }
+ if (p->direction == DPLL_PIN_DIRECTION_INPUT)
+ return ice_dpll_input_esync_set(p->input->pin, p->input, dpll,
+ dpll_priv, freq, extack);
+ else
+ return ice_dpll_output_esync_set(p->output->pin, p->output,
+ dpll, dpll_priv, freq, extack);
+}
+
+/**
+ * ice_dpll_sw_esync_get - callback for getting embedded sync on SW pin
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @esync: on success holds embedded sync frequency and properties
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting embedded sync frequency value
+ * of SW pin.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_esync_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ struct dpll_pin_esync *esync,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+
+ if (p->direction == DPLL_PIN_DIRECTION_INPUT)
+ return ice_dpll_input_esync_get(p->input->pin, p->input, dpll,
+ dpll_priv, esync, extack);
+ else
+ return ice_dpll_output_esync_get(p->output->pin, p->output,
+ dpll, dpll_priv, esync,
+ extack);
+}
+
+/*
+ * ice_dpll_input_ref_sync_set - callback for setting reference sync feature
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @ref_pin: pin pointer for reference sync pair
+ * @ref_pin_priv: private data pointer of ref_pin
+ * @state: requested state for reference sync for pin pair
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting reference sync frequency
+ * feature for input pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_input_ref_sync_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *ref_pin, void *ref_pin_priv,
+ const enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_pf *pf = p->pf;
+ u8 flags_en = 0;
+ int ret;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+
+ if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN)
+ flags_en = ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN;
+ if (state == DPLL_PIN_STATE_CONNECTED)
+ flags_en |= ICE_AQC_CGU_IN_CFG_FLG2_REFSYNC_EN;
+ ret = ice_aq_set_input_pin_cfg(&pf->hw, p->idx, 0, flags_en, 0, 0);
+ if (!ret)
+ ret = ice_dpll_pin_state_update(pf, p, ICE_DPLL_PIN_TYPE_INPUT,
+ extack);
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_input_ref_sync_get - callback for getting reference sync config
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @ref_pin: pin pointer for reference sync pair
+ * @ref_pin_priv: private data pointer of ref_pin
+ * @state: on success holds reference sync state for pin pair
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting reference sync frequency
+ * feature for input pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_input_ref_sync_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *ref_pin, void *ref_pin_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_pf *pf = p->pf;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ if (p->flags[0] & ICE_AQC_CGU_IN_CFG_FLG2_REFSYNC_EN)
+ *state = DPLL_PIN_STATE_CONNECTED;
+ else
+ *state = DPLL_PIN_STATE_DISCONNECTED;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/*
+ * ice_dpll_sw_input_ref_sync_set - callback for setting reference sync feature
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @ref_pin: pin pointer for reference sync pair
+ * @ref_pin_priv: private data pointer of ref_pin
+ * @state: requested state for reference sync for pin pair
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting reference sync
+ * feature for input pins.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_input_ref_sync_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *ref_pin,
+ void *ref_pin_priv,
+ const enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+
+ return ice_dpll_input_ref_sync_set(pin, p->input, ref_pin, ref_pin_priv,
+ state, extack);
+}
+
+/**
+ * ice_dpll_sw_input_ref_sync_get - callback for getting reference sync config
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @ref_pin: pin pointer for reference sync pair
+ * @ref_pin_priv: private data pointer of ref_pin
+ * @state: on success holds reference sync state for pin pair
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting reference sync feature for
+ * input pins.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_input_ref_sync_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *ref_pin,
+ void *ref_pin_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+
+ return ice_dpll_input_ref_sync_get(pin, p->input, ref_pin, ref_pin_priv,
+ state, extack);
+}
+
+/**
* ice_dpll_rclk_state_on_pin_set - set a state on rclk pin
* @pin: pointer to a pin
* @pin_priv: private data pointer passed on pin registration
@@ -1364,7 +2313,7 @@ ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv,
NL_SET_ERR_MSG_FMT(extack,
"err:%d %s failed to set pin state:%u for pin:%u on parent:%u",
ret,
- ice_aq_str(pf->hw.adminq.sq_last_status),
+ libie_aq_str(pf->hw.adminq.sq_last_status),
state, p->idx, parent->idx);
unlock:
mutex_unlock(&pf->dplls.lock);
@@ -1427,6 +2376,37 @@ static const struct dpll_pin_ops ice_dpll_rclk_ops = {
.direction_get = ice_dpll_input_direction,
};
+static const struct dpll_pin_ops ice_dpll_pin_sma_ops = {
+ .state_on_dpll_set = ice_dpll_sma_pin_state_set,
+ .state_on_dpll_get = ice_dpll_sw_pin_state_get,
+ .direction_get = ice_dpll_pin_sw_direction_get,
+ .direction_set = ice_dpll_pin_sma_direction_set,
+ .prio_get = ice_dpll_sw_input_prio_get,
+ .prio_set = ice_dpll_sw_input_prio_set,
+ .frequency_get = ice_dpll_sw_pin_frequency_get,
+ .frequency_set = ice_dpll_sw_pin_frequency_set,
+ .phase_adjust_get = ice_dpll_sw_phase_adjust_get,
+ .phase_adjust_set = ice_dpll_sw_phase_adjust_set,
+ .phase_offset_get = ice_dpll_phase_offset_get,
+ .esync_set = ice_dpll_sw_esync_set,
+ .esync_get = ice_dpll_sw_esync_get,
+ .ref_sync_set = ice_dpll_sw_input_ref_sync_set,
+ .ref_sync_get = ice_dpll_sw_input_ref_sync_get,
+};
+
+static const struct dpll_pin_ops ice_dpll_pin_ufl_ops = {
+ .state_on_dpll_set = ice_dpll_ufl_pin_state_set,
+ .state_on_dpll_get = ice_dpll_sw_pin_state_get,
+ .direction_get = ice_dpll_pin_sw_direction_get,
+ .frequency_get = ice_dpll_sw_pin_frequency_get,
+ .frequency_set = ice_dpll_sw_pin_frequency_set,
+ .esync_set = ice_dpll_sw_esync_set,
+ .esync_get = ice_dpll_sw_esync_get,
+ .phase_adjust_get = ice_dpll_sw_phase_adjust_get,
+ .phase_adjust_set = ice_dpll_sw_phase_adjust_set,
+ .phase_offset_get = ice_dpll_phase_offset_get,
+};
+
static const struct dpll_pin_ops ice_dpll_input_ops = {
.frequency_get = ice_dpll_input_frequency_get,
.frequency_set = ice_dpll_input_frequency_set,
@@ -1440,6 +2420,8 @@ static const struct dpll_pin_ops ice_dpll_input_ops = {
.phase_offset_get = ice_dpll_phase_offset_get,
.esync_set = ice_dpll_input_esync_set,
.esync_get = ice_dpll_input_esync_get,
+ .ref_sync_set = ice_dpll_input_ref_sync_set,
+ .ref_sync_get = ice_dpll_input_ref_sync_get,
};
static const struct dpll_pin_ops ice_dpll_output_ops = {
@@ -1459,6 +2441,13 @@ static const struct dpll_device_ops ice_dpll_ops = {
.mode_get = ice_dpll_mode_get,
};
+static const struct dpll_device_ops ice_dpll_pom_ops = {
+ .lock_status_get = ice_dpll_lock_status_get,
+ .mode_get = ice_dpll_mode_get,
+ .phase_offset_monitor_set = ice_dpll_phase_offset_monitor_set,
+ .phase_offset_monitor_get = ice_dpll_phase_offset_monitor_get,
+};
+
/**
* ice_generate_clock_id - generates unique clock_id for registering dpll.
* @pf: board private structure
@@ -1504,6 +2493,110 @@ static void ice_dpll_notify_changes(struct ice_dpll *d)
}
/**
+ * ice_dpll_is_pps_phase_monitor - check if dpll capable of phase offset monitor
+ * @pf: pf private structure
+ *
+ * Check if firmware is capable of supporting admin command to provide
+ * phase offset monitoring on all the input pins on PPS dpll.
+ *
+ * Returns:
+ * * true - PPS dpll phase offset monitoring is supported
+ * * false - PPS dpll phase offset monitoring is not supported
+ */
+static bool ice_dpll_is_pps_phase_monitor(struct ice_pf *pf)
+{
+ struct ice_cgu_input_measure meas[ICE_DPLL_INPUT_REF_NUM];
+ int ret = ice_aq_get_cgu_input_pin_measure(&pf->hw, DPLL_TYPE_PPS, meas,
+ ARRAY_SIZE(meas));
+
+ if (ret && pf->hw.adminq.sq_last_status == LIBIE_AQ_RC_ESRCH)
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_dpll_pins_notify_mask - notify dpll subsystem about bulk pin changes
+ * @pins: array of ice_dpll_pin pointers registered within dpll subsystem
+ * @pin_num: number of pins
+ * @phase_offset_ntf_mask: bitmask of pin indexes to notify
+ *
+ * Iterate over array of pins and call dpll subsystem pin notify if
+ * corresponding pin index within bitmask is set.
+ *
+ * Context: Must be called while pf->dplls.lock is released.
+ */
+static void ice_dpll_pins_notify_mask(struct ice_dpll_pin *pins,
+ u8 pin_num,
+ u32 phase_offset_ntf_mask)
+{
+ int i = 0;
+
+ for (i = 0; i < pin_num; i++)
+ if (phase_offset_ntf_mask & (1 << i))
+ dpll_pin_change_ntf(pins[i].pin);
+}
+
+/**
+ * ice_dpll_pps_update_phase_offsets - update phase offset measurements
+ * @pf: pf private structure
+ * @phase_offset_pins_updated: returns mask of updated input pin indexes
+ *
+ * Read phase offset measurements for PPS dpll device and store values in
+ * input pins array. On success phase_offset_pins_updated - fills bitmask of
+ * updated input pin indexes, pins shall be notified.
+ *
+ * Context: Shall be called with pf->dplls.lock being locked.
+ * Returns:
+ * * 0 - success or no data available
+ * * negative - AQ failure
+ */
+static int ice_dpll_pps_update_phase_offsets(struct ice_pf *pf,
+ u32 *phase_offset_pins_updated)
+{
+ struct ice_cgu_input_measure meas[ICE_DPLL_INPUT_REF_NUM];
+ struct ice_dpll_pin *p;
+ s64 phase_offset, tmp;
+ int i, j, ret;
+
+ *phase_offset_pins_updated = 0;
+ ret = ice_aq_get_cgu_input_pin_measure(&pf->hw, DPLL_TYPE_PPS, meas,
+ ARRAY_SIZE(meas));
+ if (ret && pf->hw.adminq.sq_last_status == LIBIE_AQ_RC_EAGAIN) {
+ return 0;
+ } else if (ret) {
+ dev_err(ice_pf_to_dev(pf),
+ "failed to get input pin measurements dpll=%d, ret=%d %s\n",
+ DPLL_TYPE_PPS, ret,
+ libie_aq_str(pf->hw.adminq.sq_last_status));
+ return ret;
+ }
+ for (i = 0; i < pf->dplls.num_inputs; i++) {
+ p = &pf->dplls.inputs[i];
+ phase_offset = 0;
+ for (j = 0; j < ICE_CGU_INPUT_PHASE_OFFSET_BYTES; j++) {
+ tmp = meas[i].phase_offset[j];
+#ifdef __LITTLE_ENDIAN
+ phase_offset += tmp << 8 * j;
+#else
+ phase_offset += tmp << 8 *
+ (ICE_CGU_INPUT_PHASE_OFFSET_BYTES - 1 - j);
+#endif
+ }
+ phase_offset = sign_extend64(phase_offset, 47);
+ if (p->phase_offset != phase_offset) {
+ dev_dbg(ice_pf_to_dev(pf),
+ "phase offset changed for pin:%d old:%llx, new:%llx\n",
+ p->idx, p->phase_offset, phase_offset);
+ p->phase_offset = phase_offset;
+ *phase_offset_pins_updated |= (1 << i);
+ }
+ }
+
+ return 0;
+}
+
+/**
* ice_dpll_update_state - update dpll state
* @pf: pf private structure
* @d: pointer to queried dpll device
@@ -1534,7 +2627,7 @@ ice_dpll_update_state(struct ice_pf *pf, struct ice_dpll *d, bool init)
dev_err(ice_pf_to_dev(pf),
"update dpll=%d state failed, ret=%d %s\n",
d->dpll_idx, ret,
- ice_aq_str(pf->hw.adminq.sq_last_status));
+ libie_aq_str(pf->hw.adminq.sq_last_status));
return ret;
}
if (init) {
@@ -1589,14 +2682,19 @@ static void ice_dpll_periodic_work(struct kthread_work *work)
struct ice_pf *pf = container_of(d, struct ice_pf, dplls);
struct ice_dpll *de = &pf->dplls.eec;
struct ice_dpll *dp = &pf->dplls.pps;
+ u32 phase_offset_ntf = 0;
int ret = 0;
if (ice_is_reset_in_progress(pf->state))
goto resched;
mutex_lock(&pf->dplls.lock);
+ d->periodic_counter++;
ret = ice_dpll_update_state(pf, de, false);
if (!ret)
ret = ice_dpll_update_state(pf, dp, false);
+ if (!ret && dp->phase_offset_monitor_period &&
+ d->periodic_counter % dp->phase_offset_monitor_period == 0)
+ ret = ice_dpll_pps_update_phase_offsets(pf, &phase_offset_ntf);
if (ret) {
d->cgu_state_acq_err_num++;
/* stop rescheduling this worker */
@@ -1611,6 +2709,9 @@ static void ice_dpll_periodic_work(struct kthread_work *work)
mutex_unlock(&pf->dplls.lock);
ice_dpll_notify_changes(de);
ice_dpll_notify_changes(dp);
+ if (phase_offset_ntf)
+ ice_dpll_pins_notify_mask(d->inputs, d->num_inputs,
+ phase_offset_ntf);
resched:
/* Run twice a second or reschedule if update failed */
@@ -1620,6 +2721,88 @@ resched:
}
/**
+ * ice_dpll_init_ref_sync_inputs - initialize reference sync pin pairs
+ * @pf: pf private structure
+ *
+ * Read DPLL TLV capabilities and initialize reference sync pin pairs in
+ * dpll subsystem.
+ *
+ * Return:
+ * * 0 - success or nothing to do (no ref-sync tlv are present)
+ * * negative - AQ failure
+ */
+static int ice_dpll_init_ref_sync_inputs(struct ice_pf *pf)
+{
+ struct ice_dpll_pin *inputs = pf->dplls.inputs;
+ struct ice_hw *hw = &pf->hw;
+ u16 addr, len, end, hdr;
+ int ret;
+
+ ret = ice_get_pfa_module_tlv(hw, &hdr, &len, ICE_SR_PFA_DPLL_DEFAULTS);
+ if (ret) {
+ dev_err(ice_pf_to_dev(pf),
+ "Failed to read PFA dpll defaults TLV ret=%d\n", ret);
+ return ret;
+ }
+ end = hdr + len;
+
+ for (addr = hdr + ICE_DPLL_PFA_HEADER_LEN; addr < end;
+ addr += ICE_DPLL_PFA_ENTRY_LEN) {
+ unsigned long bit, ul_mask, offset;
+ u16 pin, mask, buf;
+ bool valid = false;
+
+ ret = ice_read_sr_word(hw, addr, &buf);
+ if (ret)
+ return ret;
+
+ switch (buf) {
+ case ICE_DPLL_PFA_REF_SYNC_TYPE:
+ case ICE_DPLL_PFA_REF_SYNC_TYPE2:
+ {
+ u16 mask_addr = addr + ICE_DPLL_PFA_MASK_OFFSET;
+ u16 val_addr = addr + ICE_DPLL_PFA_VALUE_OFFSET;
+
+ ret = ice_read_sr_word(hw, mask_addr, &mask);
+ if (ret)
+ return ret;
+ ret = ice_read_sr_word(hw, val_addr, &pin);
+ if (ret)
+ return ret;
+ if (buf == ICE_DPLL_PFA_REF_SYNC_TYPE)
+ pin >>= ICE_DPLL_PFA_MAILBOX_REF_SYNC_PIN_S;
+ valid = true;
+ break;
+ }
+ case ICE_DPLL_PFA_END:
+ addr = end;
+ break;
+ default:
+ continue;
+ }
+ if (!valid)
+ continue;
+
+ ul_mask = mask;
+ offset = 0;
+ for_each_set_bit(bit, &ul_mask, BITS_PER_TYPE(u16)) {
+ int i, j;
+
+ if (hw->device_id == ICE_DEV_ID_E810C_SFP &&
+ pin > ICE_DPLL_E810C_SFP_NC_START)
+ offset = -ICE_DPLL_E810C_SFP_NC_PINS;
+ i = pin + offset;
+ j = bit + offset;
+ if (i < 0 || j < 0)
+ return -ERANGE;
+ inputs[i].ref_sync = j;
+ }
+ }
+
+ return 0;
+}
+
+/**
* ice_dpll_release_pins - release pins resources from dpll subsystem
* @pins: pointer to pins array
* @count: number of pins
@@ -1689,7 +2872,38 @@ ice_dpll_unregister_pins(struct dpll_device *dpll, struct ice_dpll_pin *pins,
int i;
for (i = 0; i < count; i++)
- dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]);
+ if (!pins[i].hidden)
+ dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]);
+}
+
+/**
+ * ice_dpll_pin_ref_sync_register - register reference sync pins
+ * @pins: pointer to pins array
+ * @count: number of pins
+ *
+ * Register reference sync pins in dpll subsystem.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - registration failure reason
+ */
+static int
+ice_dpll_pin_ref_sync_register(struct ice_dpll_pin *pins, int count)
+{
+ int ret, i;
+
+ for (i = 0; i < count; i++) {
+ if (!pins[i].hidden && pins[i].ref_sync) {
+ int j = pins[i].ref_sync;
+
+ ret = dpll_pin_ref_sync_pair_add(pins[i].pin,
+ pins[j].pin);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
}
/**
@@ -1712,16 +2926,19 @@ ice_dpll_register_pins(struct dpll_device *dpll, struct ice_dpll_pin *pins,
int ret, i;
for (i = 0; i < count; i++) {
- ret = dpll_pin_register(dpll, pins[i].pin, ops, &pins[i]);
- if (ret)
- goto unregister_pins;
+ if (!pins[i].hidden) {
+ ret = dpll_pin_register(dpll, pins[i].pin, ops, &pins[i]);
+ if (ret)
+ goto unregister_pins;
+ }
}
return 0;
unregister_pins:
while (--i >= 0)
- dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]);
+ if (!pins[i].hidden)
+ dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]);
return ret;
}
@@ -1909,6 +3126,18 @@ static void ice_dpll_deinit_pins(struct ice_pf *pf, bool cgu)
ice_dpll_unregister_pins(de->dpll, outputs,
&ice_dpll_output_ops, num_outputs);
ice_dpll_release_pins(outputs, num_outputs);
+ if (!pf->dplls.generic) {
+ ice_dpll_deinit_direct_pins(cgu, pf->dplls.ufl,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_ufl_ops,
+ pf->dplls.pps.dpll,
+ pf->dplls.eec.dpll);
+ ice_dpll_deinit_direct_pins(cgu, pf->dplls.sma,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_sma_ops,
+ pf->dplls.pps.dpll,
+ pf->dplls.eec.dpll);
+ }
}
}
@@ -1926,8 +3155,7 @@ static void ice_dpll_deinit_pins(struct ice_pf *pf, bool cgu)
*/
static int ice_dpll_init_pins(struct ice_pf *pf, bool cgu)
{
- u32 rclk_idx;
- int ret;
+ int ret, count;
ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.inputs, 0,
pf->dplls.num_inputs,
@@ -1935,23 +3163,64 @@ static int ice_dpll_init_pins(struct ice_pf *pf, bool cgu)
pf->dplls.eec.dpll, pf->dplls.pps.dpll);
if (ret)
return ret;
+ count = pf->dplls.num_inputs;
if (cgu) {
ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.outputs,
- pf->dplls.num_inputs,
+ count,
pf->dplls.num_outputs,
&ice_dpll_output_ops,
pf->dplls.eec.dpll,
pf->dplls.pps.dpll);
if (ret)
goto deinit_inputs;
+ count += pf->dplls.num_outputs;
+ if (!pf->dplls.generic) {
+ ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.sma,
+ count,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_sma_ops,
+ pf->dplls.eec.dpll,
+ pf->dplls.pps.dpll);
+ if (ret)
+ goto deinit_outputs;
+ count += ICE_DPLL_PIN_SW_NUM;
+ ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.ufl,
+ count,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_ufl_ops,
+ pf->dplls.eec.dpll,
+ pf->dplls.pps.dpll);
+ if (ret)
+ goto deinit_sma;
+ count += ICE_DPLL_PIN_SW_NUM;
+ }
+ ret = ice_dpll_pin_ref_sync_register(pf->dplls.inputs,
+ pf->dplls.num_inputs);
+ if (ret)
+ goto deinit_ufl;
+ ret = ice_dpll_pin_ref_sync_register(pf->dplls.sma,
+ ICE_DPLL_PIN_SW_NUM);
+ if (ret)
+ goto deinit_ufl;
+ } else {
+ count += pf->dplls.num_outputs + 2 * ICE_DPLL_PIN_SW_NUM;
}
- rclk_idx = pf->dplls.num_inputs + pf->dplls.num_outputs + pf->hw.pf_id;
- ret = ice_dpll_init_rclk_pins(pf, &pf->dplls.rclk, rclk_idx,
+ ret = ice_dpll_init_rclk_pins(pf, &pf->dplls.rclk, count + pf->hw.pf_id,
&ice_dpll_rclk_ops);
if (ret)
- goto deinit_outputs;
+ goto deinit_ufl;
return 0;
+deinit_ufl:
+ ice_dpll_deinit_direct_pins(cgu, pf->dplls.ufl,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_ufl_ops,
+ pf->dplls.pps.dpll, pf->dplls.eec.dpll);
+deinit_sma:
+ ice_dpll_deinit_direct_pins(cgu, pf->dplls.sma,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_sma_ops,
+ pf->dplls.pps.dpll, pf->dplls.eec.dpll);
deinit_outputs:
ice_dpll_deinit_direct_pins(cgu, pf->dplls.outputs,
pf->dplls.num_outputs,
@@ -1977,7 +3246,7 @@ static void
ice_dpll_deinit_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu)
{
if (cgu)
- dpll_device_unregister(d->dpll, &ice_dpll_ops, d);
+ dpll_device_unregister(d->dpll, d->ops, d);
dpll_device_put(d->dpll);
}
@@ -2011,12 +3280,17 @@ ice_dpll_init_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu,
}
d->pf = pf;
if (cgu) {
+ const struct dpll_device_ops *ops = &ice_dpll_ops;
+
+ if (type == DPLL_TYPE_PPS && ice_dpll_is_pps_phase_monitor(pf))
+ ops = &ice_dpll_pom_ops;
ice_dpll_update_state(pf, d, true);
- ret = dpll_device_register(d->dpll, type, &ice_dpll_ops, d);
+ ret = dpll_device_register(d->dpll, type, ops, d);
if (ret) {
dpll_device_put(d->dpll);
return ret;
}
+ d->ops = ops;
}
return 0;
@@ -2184,8 +3458,10 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
default:
return -EINVAL;
}
- if (num_pins != ice_cgu_get_num_pins(hw, input))
+ if (num_pins != ice_cgu_get_num_pins(hw, input)) {
+ pf->dplls.generic = true;
return ice_dpll_init_info_pins_generic(pf, input);
+ }
for (i = 0; i < num_pins; i++) {
caps = 0;
@@ -2203,10 +3479,14 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
return ret;
caps |= (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE);
+ if (ice_dpll_is_sw_pin(pf, i, true))
+ pins[i].hidden = true;
} else {
ret = ice_cgu_get_output_pin_state_caps(hw, i, &caps);
if (ret)
return ret;
+ if (ice_dpll_is_sw_pin(pf, i, false))
+ pins[i].hidden = true;
}
ice_dpll_phase_range_set(&pins[i].prop.phase_range,
phase_adj_max);
@@ -2219,6 +3499,8 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
pins[i].prop.freq_supported_num = freq_supp_num;
pins[i].pf = pf;
}
+ if (input)
+ ret = ice_dpll_init_ref_sync_inputs(pf);
return ret;
}
@@ -2246,6 +3528,91 @@ static int ice_dpll_init_info_rclk_pin(struct ice_pf *pf)
}
/**
+ * ice_dpll_init_info_sw_pins - initializes software controlled pin information
+ * @pf: board private structure
+ *
+ * Init information for software controlled pins, cache them in
+ * pf->dplls.sma and pf->dplls.ufl.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - init failure reason
+ */
+static int ice_dpll_init_info_sw_pins(struct ice_pf *pf)
+{
+ u8 freq_supp_num, pin_abs_idx, input_idx_offset = 0;
+ struct ice_dplls *d = &pf->dplls;
+ struct ice_dpll_pin *pin;
+ u32 phase_adj_max, caps;
+ int i, ret;
+
+ if (pf->hw.device_id == ICE_DEV_ID_E810C_QSFP)
+ input_idx_offset = ICE_E810_RCLK_PINS_NUM;
+ phase_adj_max = max(d->input_phase_adj_max, d->output_phase_adj_max);
+ caps = DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
+ for (i = 0; i < ICE_DPLL_PIN_SW_NUM; i++) {
+ pin = &d->sma[i];
+ pin->idx = i;
+ pin->prop.type = DPLL_PIN_TYPE_EXT;
+ pin_abs_idx = ICE_DPLL_PIN_SW_INPUT_ABS(i) + input_idx_offset;
+ pin->prop.freq_supported =
+ ice_cgu_get_pin_freq_supp(&pf->hw, pin_abs_idx,
+ true, &freq_supp_num);
+ pin->prop.freq_supported_num = freq_supp_num;
+ pin->prop.capabilities =
+ (DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE |
+ DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
+ caps);
+ pin->pf = pf;
+ pin->prop.board_label = ice_dpll_sw_pin_sma[i];
+ pin->input = &d->inputs[pin_abs_idx];
+ if (pin->input->ref_sync)
+ pin->ref_sync = pin->input->ref_sync - pin_abs_idx;
+ pin->output = &d->outputs[ICE_DPLL_PIN_SW_OUTPUT_ABS(i)];
+ ice_dpll_phase_range_set(&pin->prop.phase_range, phase_adj_max);
+ }
+ for (i = 0; i < ICE_DPLL_PIN_SW_NUM; i++) {
+ pin = &d->ufl[i];
+ pin->idx = i;
+ pin->prop.type = DPLL_PIN_TYPE_EXT;
+ pin->prop.capabilities = caps;
+ pin->pf = pf;
+ pin->prop.board_label = ice_dpll_sw_pin_ufl[i];
+ if (i == ICE_DPLL_PIN_SW_1_IDX) {
+ pin->direction = DPLL_PIN_DIRECTION_OUTPUT;
+ pin_abs_idx = ICE_DPLL_PIN_SW_OUTPUT_ABS(i);
+ pin->prop.freq_supported =
+ ice_cgu_get_pin_freq_supp(&pf->hw, pin_abs_idx,
+ false,
+ &freq_supp_num);
+ pin->prop.freq_supported_num = freq_supp_num;
+ pin->input = NULL;
+ pin->output = &d->outputs[pin_abs_idx];
+ } else if (i == ICE_DPLL_PIN_SW_2_IDX) {
+ pin->direction = DPLL_PIN_DIRECTION_INPUT;
+ pin_abs_idx = ICE_DPLL_PIN_SW_INPUT_ABS(i) +
+ input_idx_offset;
+ pin->output = NULL;
+ pin->input = &d->inputs[pin_abs_idx];
+ pin->prop.freq_supported =
+ ice_cgu_get_pin_freq_supp(&pf->hw, pin_abs_idx,
+ true, &freq_supp_num);
+ pin->prop.freq_supported_num = freq_supp_num;
+ pin->prop.capabilities =
+ (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
+ caps);
+ }
+ ice_dpll_phase_range_set(&pin->prop.phase_range, phase_adj_max);
+ }
+ ret = ice_dpll_pin_state_update(pf, pin, ICE_DPLL_PIN_TYPE_SOFTWARE,
+ NULL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
* ice_dpll_init_pins_info - init pins info wrapper
* @pf: board private structure
* @pin_type: type of pins being initialized
@@ -2265,6 +3632,8 @@ ice_dpll_init_pins_info(struct ice_pf *pf, enum ice_dpll_pin_type pin_type)
return ice_dpll_init_info_direct_pins(pf, pin_type);
case ICE_DPLL_PIN_TYPE_RCLK_INPUT:
return ice_dpll_init_info_rclk_pin(pf);
+ case ICE_DPLL_PIN_TYPE_SOFTWARE:
+ return ice_dpll_init_info_sw_pins(pf);
default:
return -EINVAL;
}
@@ -2309,7 +3678,7 @@ static int ice_dpll_init_info(struct ice_pf *pf, bool cgu)
if (ret) {
dev_err(ice_pf_to_dev(pf),
"err:%d %s failed to read cgu abilities\n",
- ret, ice_aq_str(hw->adminq.sq_last_status));
+ ret, libie_aq_str(hw->adminq.sq_last_status));
return ret;
}
@@ -2351,6 +3720,9 @@ static int ice_dpll_init_info(struct ice_pf *pf, bool cgu)
ret = ice_dpll_init_pins_info(pf, ICE_DPLL_PIN_TYPE_OUTPUT);
if (ret)
goto deinit_info;
+ ret = ice_dpll_init_pins_info(pf, ICE_DPLL_PIN_TYPE_SOFTWARE);
+ if (ret)
+ goto deinit_info;
}
ret = ice_get_cgu_rclk_pin_info(&pf->hw, &d->base_rclk_idx,
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.h b/drivers/net/ethernet/intel/ice/ice_dpll.h
index c320f1bf7d6d..c0da03384ce9 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.h
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.h
@@ -8,6 +8,18 @@
#define ICE_DPLL_RCLK_NUM_MAX 4
+/**
+ * enum ice_dpll_pin_sw - enumerate ice software pin indices:
+ * @ICE_DPLL_PIN_SW_1_IDX: index of first SW pin
+ * @ICE_DPLL_PIN_SW_2_IDX: index of second SW pin
+ * @ICE_DPLL_PIN_SW_NUM: number of SW pins in pair
+ */
+enum ice_dpll_pin_sw {
+ ICE_DPLL_PIN_SW_1_IDX,
+ ICE_DPLL_PIN_SW_2_IDX,
+ ICE_DPLL_PIN_SW_NUM
+};
+
/** ice_dpll_pin - store info about pins
* @pin: dpll pin structure
* @pf: pointer to pf, which has registered the dpll_pin
@@ -19,6 +31,8 @@
* @prop: pin properties
* @freq: current frequency of a pin
* @phase_adjust: current phase adjust value
+ * @phase_offset: monitored phase offset value
+ * @ref_sync: store id of reference sync pin
*/
struct ice_dpll_pin {
struct dpll_pin *pin;
@@ -31,7 +45,14 @@ struct ice_dpll_pin {
struct dpll_pin_properties prop;
u32 freq;
s32 phase_adjust;
+ struct ice_dpll_pin *input;
+ struct ice_dpll_pin *output;
+ enum dpll_pin_direction direction;
+ s64 phase_offset;
u8 status;
+ u8 ref_sync;
+ bool active;
+ bool hidden;
};
/** ice_dpll - store info required for DPLL control
@@ -47,8 +68,10 @@ struct ice_dpll_pin {
* @input_prio: priorities of each input
* @dpll_state: current dpll sync state
* @prev_dpll_state: last dpll sync state
+ * @phase_offset_monitor_period: period for phase offset monitor read frequency
* @active_input: pointer to active input pin
* @prev_input: pointer to previous active input pin
+ * @ops: holds the registered ops
*/
struct ice_dpll {
struct dpll_device *dpll;
@@ -64,8 +87,10 @@ struct ice_dpll {
enum dpll_lock_status dpll_state;
enum dpll_lock_status prev_dpll_state;
enum dpll_mode mode;
+ u32 phase_offset_monitor_period;
struct dpll_pin *active_input;
struct dpll_pin *prev_input;
+ const struct dpll_device_ops *ops;
};
/** ice_dplls - store info required for CCU (clock controlling unit)
@@ -84,6 +109,7 @@ struct ice_dpll {
* @clock_id: clock_id of dplls
* @input_phase_adj_max: max phase adjust value for an input pins
* @output_phase_adj_max: max phase adjust value for an output pins
+ * @periodic_counter: counter of periodic work executions
*/
struct ice_dplls {
struct kthread_worker *kworker;
@@ -93,14 +119,19 @@ struct ice_dplls {
struct ice_dpll pps;
struct ice_dpll_pin *inputs;
struct ice_dpll_pin *outputs;
+ struct ice_dpll_pin sma[ICE_DPLL_PIN_SW_NUM];
+ struct ice_dpll_pin ufl[ICE_DPLL_PIN_SW_NUM];
struct ice_dpll_pin rclk;
u8 num_inputs;
u8 num_outputs;
- int cgu_state_acq_err_num;
+ u8 sma_data;
u8 base_rclk_idx;
+ int cgu_state_acq_err_num;
u64 clock_id;
s32 input_phase_adj_max;
s32 output_phase_adj_max;
+ u32 periodic_counter;
+ bool generic;
};
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index bbf9e6fd315b..55e0f2c6af9e 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -667,7 +667,8 @@ static int ice_get_port_topology(struct ice_hw *hw, u8 lport,
if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_100G)
port_topology->serdes_lane_count = 4;
- else if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_50G)
+ else if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_50G ||
+ max_speed == ICE_AQC_PORT_OPT_MAX_LANE_40G)
port_topology->serdes_lane_count = 2;
else
port_topology->serdes_lane_count = 1;
@@ -836,6 +837,15 @@ static void ice_set_msglevel(struct net_device *netdev, u32 data)
#endif /* !CONFIG_DYNAMIC_DEBUG */
}
+static void ice_get_link_ext_stats(struct net_device *netdev,
+ struct ethtool_link_ext_stats *stats)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+
+ stats->link_down_events = pf->link_down_events;
+}
+
static int ice_get_eeprom_len(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
@@ -869,7 +879,7 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
ret = ice_acquire_nvm(hw, ICE_RES_READ);
if (ret) {
dev_err(dev, "ice_acquire_nvm failed, err %d aq_err %s\n",
- ret, ice_aq_str(hw->adminq.sq_last_status));
+ ret, libie_aq_str(hw->adminq.sq_last_status));
goto out;
}
@@ -877,7 +887,7 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
false);
if (ret) {
dev_err(dev, "ice_read_flat_nvm failed, err %d aq_err %s\n",
- ret, ice_aq_str(hw->adminq.sq_last_status));
+ ret, libie_aq_str(hw->adminq.sq_last_status));
goto release;
}
@@ -2788,14 +2798,7 @@ done:
return err;
}
-/**
- * ice_parse_hdrs - parses headers from RSS hash input
- * @nfc: ethtool rxnfc command
- *
- * This function parses the rxnfc command and returns intended
- * header types for RSS configuration
- */
-static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc)
+static u32 ice_parse_hdrs(const struct ethtool_rxfh_fields *nfc)
{
u32 hdrs = ICE_FLOW_SEG_HDR_NONE;
@@ -2860,15 +2863,7 @@ static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc)
return hdrs;
}
-/**
- * ice_parse_hash_flds - parses hash fields from RSS hash input
- * @nfc: ethtool rxnfc command
- * @symm: true if Symmetric Topelitz is set
- *
- * This function parses the rxnfc command and returns intended
- * hash fields for RSS configuration
- */
-static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm)
+static u64 ice_parse_hash_flds(const struct ethtool_rxfh_fields *nfc, bool symm)
{
u64 hfld = ICE_HASH_INVALID;
@@ -2965,16 +2960,13 @@ static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm)
return hfld;
}
-/**
- * ice_set_rss_hash_opt - Enable/Disable flow types for RSS hash
- * @vsi: the VSI being configured
- * @nfc: ethtool rxnfc command
- *
- * Returns Success if the flow input set is supported.
- */
static int
-ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
+ice_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_rss_hash_cfg cfg;
struct device *dev;
@@ -3020,14 +3012,11 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
return 0;
}
-/**
- * ice_get_rss_hash_opt - Retrieve hash fields for a given flow-type
- * @vsi: the VSI being configured
- * @nfc: ethtool rxnfc command
- */
-static void
-ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
+static int
+ice_get_rxfh_fields(struct net_device *netdev, struct ethtool_rxfh_fields *nfc)
{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct device *dev;
u64 hash_flds;
@@ -3040,21 +3029,21 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
if (ice_is_safe_mode(pf)) {
dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
vsi->vsi_num);
- return;
+ return 0;
}
hdrs = ice_parse_hdrs(nfc);
if (hdrs == ICE_FLOW_SEG_HDR_NONE) {
dev_dbg(dev, "Header type is not valid, vsi num = %d\n",
vsi->vsi_num);
- return;
+ return 0;
}
hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs, &symm);
if (hash_flds == ICE_HASH_INVALID) {
dev_dbg(dev, "No hash fields found for the given header type, vsi num = %d\n",
vsi->vsi_num);
- return;
+ return 0;
}
if (hash_flds & ICE_FLOW_HASH_FLD_IPV4_SA ||
@@ -3081,6 +3070,8 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
hash_flds & ICE_FLOW_HASH_FLD_GTPU_UP_TEID ||
hash_flds & ICE_FLOW_HASH_FLD_GTPU_DWN_TEID)
nfc->data |= (u64)RXH_GTP_TEID;
+
+ return 0;
}
/**
@@ -3100,8 +3091,6 @@ static int ice_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
return ice_add_fdir_ethtool(vsi, cmd);
case ETHTOOL_SRXCLSRLDEL:
return ice_del_fdir_ethtool(vsi, cmd);
- case ETHTOOL_SRXFH:
- return ice_set_rss_hash_opt(vsi, cmd);
default:
break;
}
@@ -3144,10 +3133,6 @@ ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRLALL:
ret = ice_get_fdir_fltr_ids(hw, cmd, (u32 *)rule_locs);
break;
- case ETHTOOL_GRXFH:
- ice_get_rss_hash_opt(vsi, cmd);
- ret = 0;
- break;
default:
break;
}
@@ -3557,15 +3542,15 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) {
netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
} else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) {
netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
} else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) {
netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
}
@@ -3607,11 +3592,10 @@ static int
ice_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- u32 rss_context = rxfh->rss_context;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
u16 qcount, offset;
- int err, num_tc, i;
+ int err, i;
u8 *lut;
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
@@ -3619,24 +3603,8 @@ ice_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
return -EOPNOTSUPP;
}
- if (rss_context && !ice_is_adq_active(pf)) {
- netdev_err(netdev, "RSS context cannot be non-zero when ADQ is not configured.\n");
- return -EINVAL;
- }
-
- qcount = vsi->mqprio_qopt.qopt.count[rss_context];
- offset = vsi->mqprio_qopt.qopt.offset[rss_context];
-
- if (rss_context && ice_is_adq_active(pf)) {
- num_tc = vsi->mqprio_qopt.qopt.num_tc;
- if (rss_context >= num_tc) {
- netdev_err(netdev, "RSS context:%d > num_tc:%d\n",
- rss_context, num_tc);
- return -EINVAL;
- }
- /* Use channel VSI of given TC */
- vsi = vsi->tc_map_vsi[rss_context];
- }
+ qcount = vsi->mqprio_qopt.qopt.count[0];
+ offset = vsi->mqprio_qopt.qopt.offset[0];
rxfh->hfunc = ETH_RSS_HASH_TOP;
if (vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ)
@@ -3696,9 +3664,6 @@ ice_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (rxfh->rss_context)
- return -EOPNOTSUPP;
-
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
/* RSS not supported return error here */
netdev_warn(netdev, "RSS is not configured on this VSI!\n");
@@ -3896,7 +3861,7 @@ static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size)
err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
if (err)
dev_err(dev, "Cannot set RSS lut, err %d aq_err %s\n", err,
- ice_aq_str(hw->adminq.sq_last_status));
+ libie_aq_str(hw->adminq.sq_last_status));
kfree(lut);
return err;
@@ -4766,12 +4731,10 @@ static int ice_repr_ethtool_reset(struct net_device *dev, u32 *flags)
}
static const struct ethtool_ops ice_ethtool_ops = {
- .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_RX_USECS_HIGH,
.supported_input_xfrm = RXH_XFRM_SYM_XOR,
- .rxfh_per_ctx_key = true,
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
.get_fec_stats = ice_get_fec_stats,
@@ -4784,6 +4747,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.set_msglevel = ice_set_msglevel,
.self_test = ice_self_test,
.get_link = ethtool_op_get_link,
+ .get_link_ext_stats = ice_get_link_ext_stats,
.get_eeprom_len = ice_get_eeprom_len,
.get_eeprom = ice_get_eeprom,
.get_coalesce = ice_get_coalesce,
@@ -4806,6 +4770,8 @@ static const struct ethtool_ops ice_ethtool_ops = {
.get_rxfh_indir_size = ice_get_rxfh_indir_size,
.get_rxfh = ice_get_rxfh,
.set_rxfh = ice_set_rxfh,
+ .get_rxfh_fields = ice_get_rxfh_fields,
+ .set_rxfh_fields = ice_set_rxfh_fields,
.get_channels = ice_get_channels,
.set_channels = ice_set_channels,
.get_ts_info = ice_get_ts_info,
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index ed95072ca6e3..363ae79a3620 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -3043,16 +3043,16 @@ ice_disable_fd_swap(struct ice_hw *hw, u8 prof_id)
* the ID value used here.
*/
int
-ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
- const struct ice_ptype_attributes *attr, u16 attr_cnt,
- struct ice_fv_word *es, u16 *masks, bool symm, bool fd_swap)
+ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
+ unsigned long *ptypes, const struct ice_ptype_attributes *attr,
+ u16 attr_cnt, struct ice_fv_word *es, u16 *masks, bool symm,
+ bool fd_swap)
{
- u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
struct ice_prof_map *prof;
- u8 byte = 0;
- u8 prof_id;
int status;
+ u8 prof_id;
+ u16 ptype;
bitmap_zero(ptgs_used, ICE_XLT1_CNT);
@@ -3102,57 +3102,35 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
prof->context = 0;
/* build list of ptgs */
- while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
- u8 bit;
+ for_each_set_bit(ptype, ptypes, ICE_FLOW_PTYPE_MAX) {
+ u8 ptg;
- if (!ptypes[byte]) {
- bytes--;
- byte++;
+ /* The package should place all ptypes in a non-zero
+ * PTG, so the following call should never fail.
+ */
+ if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
continue;
- }
- /* Examine 8 bits per byte */
- for_each_set_bit(bit, (unsigned long *)&ptypes[byte],
- BITS_PER_BYTE) {
- u16 ptype;
- u8 ptg;
-
- ptype = byte * BITS_PER_BYTE + bit;
-
- /* The package should place all ptypes in a non-zero
- * PTG, so the following call should never fail.
- */
- if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
- continue;
+ /* If PTG is already added, skip and continue */
+ if (test_bit(ptg, ptgs_used))
+ continue;
- /* If PTG is already added, skip and continue */
- if (test_bit(ptg, ptgs_used))
- continue;
+ set_bit(ptg, ptgs_used);
+ /* Check to see there are any attributes for this ptype, and
+ * add them if found.
+ */
+ status = ice_add_prof_attrib(prof, ptg, ptype, attr, attr_cnt);
+ if (status == -ENOSPC)
+ break;
+ if (status) {
+ /* This is simple a ptype/PTG with no attribute */
+ prof->ptg[prof->ptg_cnt] = ptg;
+ prof->attr[prof->ptg_cnt].flags = 0;
+ prof->attr[prof->ptg_cnt].mask = 0;
- __set_bit(ptg, ptgs_used);
- /* Check to see there are any attributes for
- * this PTYPE, and add them if found.
- */
- status = ice_add_prof_attrib(prof, ptg, ptype,
- attr, attr_cnt);
- if (status == -ENOSPC)
+ if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
break;
- if (status) {
- /* This is simple a PTYPE/PTG with no
- * attribute
- */
- prof->ptg[prof->ptg_cnt] = ptg;
- prof->attr[prof->ptg_cnt].flags = 0;
- prof->attr[prof->ptg_cnt].mask = 0;
-
- if (++prof->ptg_cnt >=
- ICE_MAX_PTG_PER_PROFILE)
- break;
- }
}
-
- bytes--;
- byte++;
}
list_add(&prof->list, &hw->blk[blk].es.prof_map);
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 28b0897adf32..ee5d9f9c9d53 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -39,9 +39,10 @@ bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype);
/* XLT2/VSI group functions */
int
-ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
- const struct ice_ptype_attributes *attr, u16 attr_cnt,
- struct ice_fv_word *es, u16 *masks, bool symm, bool fd_swap);
+ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
+ unsigned long *ptypes, const struct ice_ptype_attributes *attr,
+ u16 attr_cnt, struct ice_fv_word *es, u16 *masks, bool symm,
+ bool fd_swap);
struct ice_prof_map *
ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id);
int
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index d97b751052f2..6d5c939dc8a5 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -1421,7 +1421,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
}
/* Add a HW profile for this flow profile */
- status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
+ status = ice_add_prof(hw, blk, prof_id, params->ptypes,
params->attr, params->attr_cnt, params->es,
params->mask, symm, true);
if (status) {
@@ -1617,7 +1617,7 @@ ice_flow_set_parser_prof(struct ice_hw *hw, u16 dest_vsi, u16 fdir_vsi,
break;
}
- status = ice_add_prof(hw, blk, id, (u8 *)prof->ptypes,
+ status = ice_add_prof(hw, blk, id, prof->ptypes,
params->attr, params->attr_cnt,
params->es, params->mask, false, false);
if (status)
@@ -2573,38 +2573,38 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
* convert its values to their appropriate flow L3, L4 values.
*/
#define ICE_FLOW_AVF_RSS_IPV4_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4))
#define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP))
#define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP))
#define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \
(ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \
- ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP))
+ ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP))
#define ICE_FLOW_AVF_RSS_IPV6_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6))
#define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP))
#define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP))
#define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \
(ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \
- ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP))
+ ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP))
/**
* ice_add_avf_rss_cfg - add an RSS configuration for AVF driver
* @hw: pointer to the hardware structure
* @vsi: VF's VSI
- * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure
+ * @avf_hash: hash bit fields (LIBIE_FILTER_PCTYPE_*) to configure
*
* This function will take the hash bitmap provided by the AVF driver via a
* message, convert it to ICE-compatible values, and configure RSS flow
@@ -2621,8 +2621,7 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, u64 avf_hash)
return -EINVAL;
vsi_handle = vsi->idx;
- if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
- !ice_is_vsi_valid(hw, vsi_handle))
+ if (!avf_hash || !ice_is_vsi_valid(hw, vsi_handle))
return -EINVAL;
/* Make sure no unsupported bits are specified */
@@ -2658,11 +2657,11 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, u64 avf_hash)
ICE_FLOW_HASH_UDP_PORT;
hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS;
} else if (hash_flds &
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) {
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP)) {
rss_hash = ICE_FLOW_HASH_IPV4 |
ICE_FLOW_HASH_SCTP_PORT;
hash_flds &=
- ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP);
+ ~BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP);
}
} else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) {
if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) {
@@ -2679,11 +2678,11 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, u64 avf_hash)
ICE_FLOW_HASH_UDP_PORT;
hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS;
} else if (hash_flds &
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) {
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP)) {
rss_hash = ICE_FLOW_HASH_IPV6 |
ICE_FLOW_HASH_SCTP_PORT;
hash_flds &=
- ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP);
+ ~BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP);
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index 6cb7bb879c98..52f906d89eca 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -4,6 +4,8 @@
#ifndef _ICE_FLOW_H_
#define _ICE_FLOW_H_
+#include <linux/net/intel/libie/pctype.h>
+
#include "ice_flex_type.h"
#include "ice_parser.h"
@@ -264,57 +266,27 @@ enum ice_flow_field {
#define ICE_FLOW_HASH_FLD_GTPU_DWN_TEID \
BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID)
-/* Flow headers and fields for AVF support */
-enum ice_flow_avf_hdr_field {
- /* Values 0 - 28 are reserved for future use */
- ICE_AVF_FLOW_FIELD_INVALID = 0,
- ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP = 29,
- ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP,
- ICE_AVF_FLOW_FIELD_IPV4_UDP,
- ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK,
- ICE_AVF_FLOW_FIELD_IPV4_TCP,
- ICE_AVF_FLOW_FIELD_IPV4_SCTP,
- ICE_AVF_FLOW_FIELD_IPV4_OTHER,
- ICE_AVF_FLOW_FIELD_FRAG_IPV4,
- /* Values 37-38 are reserved */
- ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP = 39,
- ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP,
- ICE_AVF_FLOW_FIELD_IPV6_UDP,
- ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK,
- ICE_AVF_FLOW_FIELD_IPV6_TCP,
- ICE_AVF_FLOW_FIELD_IPV6_SCTP,
- ICE_AVF_FLOW_FIELD_IPV6_OTHER,
- ICE_AVF_FLOW_FIELD_FRAG_IPV6,
- ICE_AVF_FLOW_FIELD_RSVD47,
- ICE_AVF_FLOW_FIELD_FCOE_OX,
- ICE_AVF_FLOW_FIELD_FCOE_RX,
- ICE_AVF_FLOW_FIELD_FCOE_OTHER,
- /* Values 51-62 are reserved */
- ICE_AVF_FLOW_FIELD_L2_PAYLOAD = 63,
- ICE_AVF_FLOW_FIELD_MAX
-};
-
/* Supported RSS offloads This macro is defined to support
- * VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware
+ * VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS ops. PF driver sends the RSS hardware
* capabilities to the caller of this ops.
*/
-#define ICE_DEFAULT_RSS_HENA ( \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
+#define ICE_DEFAULT_RSS_HASHCFG ( \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
enum ice_rss_cfg_hdr_type {
ICE_RSS_OUTER_HEADERS, /* take outer headers as inputset. */
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index 70c201f569ce..d86db081579f 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -68,7 +68,7 @@ ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length)
if (status) {
dev_err(dev, "Failed to send record package data to firmware, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to record package data to firmware");
return -EIO;
}
@@ -257,7 +257,7 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
if (status) {
dev_err(dev, "Failed to transfer component table to firmware, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to transfer component table to firmware");
return -EIO;
}
@@ -299,7 +299,8 @@ int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
struct device *dev = ice_pf_to_dev(pf);
struct ice_aq_task task = {};
struct ice_hw *hw = &pf->hw;
- struct ice_aq_desc *desc;
+ struct libie_aq_desc *desc;
+ struct ice_aqc_nvm *cmd;
u32 completion_offset;
int err;
@@ -313,7 +314,7 @@ int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
if (err) {
dev_err(dev, "Failed to flash module 0x%02x with block of size %u at offset %u, err %d aq_err %s\n",
module, block_size, offset, err,
- ice_aq_str(hw->adminq.sq_last_status));
+ libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to program flash module");
return -EIO;
}
@@ -333,11 +334,12 @@ int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
}
desc = &task.event.desc;
- completion_module = le16_to_cpu(desc->params.nvm.module_typeid);
+ cmd = libie_aq_raw(desc);
+ completion_module = le16_to_cpu(cmd->module_typeid);
completion_retval = le16_to_cpu(desc->retval);
- completion_offset = le16_to_cpu(desc->params.nvm.offset_low);
- completion_offset |= desc->params.nvm.offset_high << 16;
+ completion_offset = le16_to_cpu(cmd->offset_low);
+ completion_offset |= cmd->offset_high << 16;
if (completion_module != module) {
dev_err(dev, "Unexpected module_typeid in write completion: got 0x%x, expected 0x%x\n",
@@ -356,7 +358,7 @@ int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
if (completion_retval) {
dev_err(dev, "Firmware failed to flash module 0x%02x with block of size %u at offset %u, err %s\n",
module, block_size, offset,
- ice_aq_str((enum ice_aq_err)completion_retval));
+ libie_aq_str((enum libie_aq_err)completion_retval));
NL_SET_ERR_MSG_MOD(extack, "Firmware failed to program flash module");
return -EIO;
}
@@ -369,7 +371,7 @@ int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
*/
if (reset_level && last_cmd && module == ICE_SR_1ST_NVM_BANK_PTR) {
if (hw->dev_caps.common_cap.pcie_reset_avoidance) {
- *reset_level = desc->params.nvm.cmd_flags &
+ *reset_level = cmd->cmd_flags &
ICE_AQC_NVM_RESET_LVL_M;
dev_dbg(dev, "Firmware reported required reset level as %u\n",
*reset_level);
@@ -487,7 +489,8 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
struct device *dev = ice_pf_to_dev(pf);
struct ice_aq_task task = {};
struct ice_hw *hw = &pf->hw;
- struct ice_aq_desc *desc;
+ struct libie_aq_desc *desc;
+ struct ice_aqc_nvm *cmd;
struct devlink *devlink;
int err;
@@ -503,7 +506,7 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
if (err) {
dev_err(dev, "Failed to erase %s (module 0x%02x), err %d aq_err %s\n",
component, module, err,
- ice_aq_str(hw->adminq.sq_last_status));
+ libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to erase flash module");
err = -EIO;
goto out_notify_devlink;
@@ -518,7 +521,8 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
}
desc = &task.event.desc;
- completion_module = le16_to_cpu(desc->params.nvm.module_typeid);
+ cmd = libie_aq_raw(desc);
+ completion_module = le16_to_cpu(cmd->module_typeid);
completion_retval = le16_to_cpu(desc->retval);
if (completion_module != module) {
@@ -532,7 +536,7 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
if (completion_retval) {
dev_err(dev, "Firmware failed to erase %s (module 0x02%x), aq_err %s\n",
component, module,
- ice_aq_str((enum ice_aq_err)completion_retval));
+ libie_aq_str((enum libie_aq_err)completion_retval));
NL_SET_ERR_MSG_MOD(extack, "Firmware failed to erase flash");
err = -EIO;
goto out_notify_devlink;
@@ -579,7 +583,7 @@ ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
err = ice_nvm_write_activate(hw, activate_flags, &response_flags);
if (err) {
dev_err(dev, "Failed to switch active flash banks, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to switch active flash banks");
return -EIO;
}
@@ -611,7 +615,7 @@ ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
completion_retval = le16_to_cpu(task.event.desc.retval);
if (completion_retval) {
dev_err(dev, "Firmware failed to switch active flash banks aq_err %s\n",
- ice_aq_str((enum ice_aq_err)completion_retval));
+ libie_aq_str((enum libie_aq_err)completion_retval));
NL_SET_ERR_MSG_MOD(extack, "Firmware failed to switch active flash banks");
return -EIO;
}
@@ -949,7 +953,7 @@ ice_cancel_pending_update(struct ice_pf *pf, const char *component,
err = ice_acquire_nvm(hw, ICE_RES_WRITE);
if (err) {
dev_err(dev, "Failed to acquire device flash lock, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock");
return err;
}
@@ -1042,7 +1046,7 @@ int ice_devlink_flash_update(struct devlink *devlink,
err = ice_acquire_nvm(hw, ICE_RES_WRITE);
if (err) {
dev_err(dev, "Failed to acquire device flash lock, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock");
return err;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.c b/drivers/net/ethernet/intel/ice/ice_fwlog.c
index 4fd15387a7e5..a31bb026ad34 100644
--- a/drivers/net/ethernet/intel/ice/ice_fwlog.c
+++ b/drivers/net/ethernet/intel/ice/ice_fwlog.c
@@ -240,7 +240,7 @@ ice_aq_fwlog_set(struct ice_hw *hw, struct ice_fwlog_module_entry *entries,
{
struct ice_aqc_fw_log_cfg_resp *fw_modules;
struct ice_aqc_fw_log *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
int i;
@@ -255,9 +255,9 @@ ice_aq_fwlog_set(struct ice_hw *hw, struct ice_fwlog_module_entry *entries,
}
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_config);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
- cmd = &desc.params.fw_log;
+ cmd = libie_aq_raw(&desc);
cmd->cmd_flags = ICE_AQC_FW_LOG_CONF_SET_VALID;
cmd->ops.cfg.log_resolution = cpu_to_le16(log_resolution);
@@ -309,7 +309,7 @@ static int ice_aq_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
{
struct ice_aqc_fw_log_cfg_resp *fw_modules;
struct ice_aqc_fw_log *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 module_id_cnt;
int status;
void *buf;
@@ -322,7 +322,7 @@ static int ice_aq_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
return -ENOMEM;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_query);
- cmd = &desc.params.fw_log;
+ cmd = libie_aq_raw(&desc);
cmd->cmd_flags = ICE_AQC_FW_LOG_AQ_QUERY;
@@ -384,12 +384,14 @@ int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
*/
static int ice_aq_fwlog_register(struct ice_hw *hw, bool reg)
{
- struct ice_aq_desc desc;
+ struct ice_aqc_fw_log *cmd;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_register);
+ cmd = libie_aq_raw(&desc);
if (reg)
- desc.params.fw_log.cmd_flags = ICE_AQC_FW_LOG_AQ_REGISTER;
+ cmd->cmd_flags = ICE_AQC_FW_LOG_AQ_REGISTER;
return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index aa4bfbcf85d2..dd520aa4d1d6 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -16,6 +16,7 @@
#define GLCOMM_QUANTA_PROF_MAX_DESC_M ICE_M(0x3F, 24)
#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4))
#define QTX_COMM_HEAD(_DBQM) (0x000E0000 + ((_DBQM) * 4))
+#define QTX_COMM_HEAD_MAX_INDEX 16383
#define QTX_COMM_HEAD_HEAD_S 0
#define QTX_COMM_HEAD_HEAD_M ICE_M(0x1FFF, 0)
#define PF_FW_ARQBAH 0x00080180
@@ -272,6 +273,8 @@
#define VPINT_ALLOC_PCI_VALID_M BIT(31)
#define VPINT_MBX_CTL(_VSI) (0x0016A000 + ((_VSI) * 4))
#define VPINT_MBX_CTL_CAUSE_ENA_M BIT(30)
+#define PFLAN_TX_QALLOC(_PF) (0x001D2580 + ((_PF) * 4))
+#define PFLAN_TX_QALLOC_FIRSTQ_M GENMASK(13, 0)
#define GLLAN_RCTL_0 0x002941F8
#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4))
#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4))
@@ -376,6 +379,15 @@
#define GLNVM_ULD_POR_DONE_1_M BIT(8)
#define GLNVM_ULD_PCIER_DONE_2_M BIT(9)
#define GLNVM_ULD_PE_DONE_M BIT(10)
+#define GLCOMM_QTX_CNTX_CTL 0x002D2DC8
+#define GLCOMM_QTX_CNTX_CTL_QUEUE_ID_M GENMASK(13, 0)
+#define GLCOMM_QTX_CNTX_CTL_CMD_M GENMASK(18, 16)
+#define GLCOMM_QTX_CNTX_CTL_CMD_READ 0
+#define GLCOMM_QTX_CNTX_CTL_CMD_WRITE 1
+#define GLCOMM_QTX_CNTX_CTL_CMD_RESET 3
+#define GLCOMM_QTX_CNTX_CTL_CMD_WRITE_NO_DYN 4
+#define GLCOMM_QTX_CNTX_CTL_CMD_EXEC_M BIT(19)
+#define GLCOMM_QTX_CNTX_DATA(_i) (0x002D2D40 + ((_i) * 4))
#define GLPCI_CNF2 0x000BE004
#define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1)
#define PF_FUNC_RID 0x0009E880
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 2410aee59fb2..b1129da72139 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -823,6 +823,48 @@ cp_free:
}
/**
+ * ice_lag_prepare_vf_reset - helper to adjust vf lag for reset
+ * @lag: lag struct for interface that owns VF
+ *
+ * Context: must be called with the lag_mutex lock held.
+ *
+ * Return: active lport value or ICE_LAG_INVALID_PORT if nothing moved.
+ */
+u8 ice_lag_prepare_vf_reset(struct ice_lag *lag)
+{
+ u8 pri_prt, act_prt;
+
+ if (lag && lag->bonded && lag->primary && lag->upper_netdev) {
+ pri_prt = lag->pf->hw.port_info->lport;
+ act_prt = lag->active_port;
+ if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT) {
+ ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
+ return act_prt;
+ }
+ }
+
+ return ICE_LAG_INVALID_PORT;
+}
+
+/**
+ * ice_lag_complete_vf_reset - helper for lag after reset
+ * @lag: lag struct for primary interface
+ * @act_prt: which port should be active for lag
+ *
+ * Context: must be called while holding the lag_mutex.
+ */
+void ice_lag_complete_vf_reset(struct ice_lag *lag, u8 act_prt)
+{
+ u8 pri_prt;
+
+ if (lag && lag->bonded && lag->primary &&
+ act_prt != ICE_LAG_INVALID_PORT) {
+ pri_prt = lag->pf->hw.port_info->lport;
+ ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ }
+}
+
+/**
* ice_lag_info_event - handle NETDEV_BONDING_INFO event
* @lag: LAG info struct
* @ptr: opaque data pointer
@@ -1102,7 +1144,7 @@ ice_lag_set_swid(u16 primary_swid, struct ice_lag *local_lag,
{
struct ice_aqc_alloc_free_res_elem *buf;
struct ice_aqc_set_port_params *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 buf_len, swid;
int status, i;
@@ -1150,7 +1192,7 @@ ice_lag_set_swid(u16 primary_swid, struct ice_lag *local_lag,
else
swid = local_lag->pf->hw.port_info->sw_id;
- cmd = &desc.params.set_port_params;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
cmd->swid = cpu_to_le16(ICE_AQC_PORT_SWID_VALID | swid);
@@ -2226,7 +2268,8 @@ bool ice_lag_is_switchdev_running(struct ice_pf *pf)
struct ice_lag *lag = pf->lag;
struct net_device *tmp_nd;
- if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) || !lag)
+ if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) ||
+ !lag || !lag->upper_netdev)
return false;
rcu_read_lock();
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
index bab2c83142a1..69347d9f986b 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.h
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -70,4 +70,6 @@ void ice_deinit_lag(struct ice_pf *pf);
void ice_lag_rebuild(struct ice_pf *pf);
bool ice_lag_is_switchdev_running(struct ice_pf *pf);
void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt);
+u8 ice_lag_prepare_vf_reset(struct ice_lag *lag);
+void ice_lag_complete_vf_reset(struct ice_lag *lag, u8 act_prt);
#endif /* _ICE_LAG_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 03bb16191237..a439b5a61a56 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -484,8 +484,7 @@ static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data)
if (!q_vector->tx.tx_ring)
return IRQ_HANDLED;
-#define FDIR_RX_DESC_CLEAN_BUDGET 64
- ice_clean_rx_irq(q_vector->rx.rx_ring, FDIR_RX_DESC_CLEAN_BUDGET);
+ ice_clean_ctrl_rx_irq(q_vector->rx.rx_ring);
ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring);
return IRQ_HANDLED;
@@ -1579,7 +1578,7 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
return;
}
- status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HENA);
+ status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HASHCFG);
if (status)
dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
vsi->vsi_num, status);
@@ -3200,7 +3199,7 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
if (!netdev)
return;
- /* CHNL VSI doesn't have it's own netdev, hence, no netdev_tc */
+ /* CHNL VSI doesn't have its own netdev, hence, no netdev_tc */
if (vsi->type == ICE_VSI_CHNL)
return;
@@ -3737,20 +3736,20 @@ int ice_set_link(struct ice_vsi *vsi, bool ena)
status = ice_aq_set_link_restart_an(pi, ena, NULL);
- /* if link is owned by manageability, FW will return ICE_AQ_RC_EMODE.
+ /* if link is owned by manageability, FW will return LIBIE_AQ_RC_EMODE.
* this is not a fatal error, so print a warning message and return
* a success code. Return an error if FW returns an error code other
- * than ICE_AQ_RC_EMODE
+ * than LIBIE_AQ_RC_EMODE
*/
if (status == -EIO) {
- if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
+ if (hw->adminq.sq_last_status == LIBIE_AQ_RC_EMODE)
dev_dbg(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n",
(ena ? "ON" : "OFF"), status,
- ice_aq_str(hw->adminq.sq_last_status));
+ libie_aq_str(hw->adminq.sq_last_status));
} else if (status) {
dev_err(dev, "can't set link to %s, err %d aq_err %s\n",
(ena ? "ON" : "OFF"), status,
- ice_aq_str(hw->adminq.sq_last_status));
+ libie_aq_str(hw->adminq.sq_last_status));
return status;
}
@@ -4021,3 +4020,38 @@ ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set)
vsi->info = ctx.info;
return 0;
}
+
+/**
+ * ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI
+ * @vsi: VSI used to update l2tsel on
+ * @l2tsel: l2tsel setting requested
+ *
+ * Use the l2tsel setting to update all of the Rx queue context bits for l2tsel.
+ * This will modify which descriptor field the first offloaded VLAN will be
+ * stripped into.
+ */
+void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 l2tsel_bit;
+ int i;
+
+ if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND)
+ l2tsel_bit = 0;
+ else
+ l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET);
+
+ for (i = 0; i < vsi->alloc_rxq; i++) {
+ u16 pfq = vsi->rxq_map[i];
+ u32 qrx_context_offset;
+ u32 regval;
+
+ qrx_context_offset =
+ QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq);
+
+ regval = rd32(hw, qrx_context_offset);
+ regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET);
+ regval |= l2tsel_bit;
+ wr32(hw, qrx_context_offset, regval);
+ }
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 654516c5fc3e..2cb1eb98b9da 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -11,6 +11,13 @@
#define ICE_VSI_FLAG_INIT BIT(0)
#define ICE_VSI_FLAG_NO_INIT 0
+#define ICE_L2TSEL_QRX_CONTEXT_REG_IDX 3
+#define ICE_L2TSEL_BIT_OFFSET 23
+enum ice_l2tsel {
+ ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND,
+ ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1,
+};
+
const char *ice_vsi_type_str(enum ice_vsi_type vsi_type);
bool ice_pf_state_is_nominal(struct ice_pf *pf);
@@ -116,4 +123,5 @@ void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f);
void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f);
void ice_init_feature_support(struct ice_pf *pf);
bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi);
+void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel);
#endif /* !_ICE_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 0a11b4281092..8e0b06c1e02b 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -38,6 +38,7 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_IMPORT_NS("LIBIE");
+MODULE_IMPORT_NS("LIBIE_ADMINQ");
MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
@@ -379,7 +380,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
* should go into promiscuous mode. There should be some
* space reserved for promiscuous filters.
*/
- if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
+ if (hw->adminq.sq_last_status == LIBIE_AQ_RC_ENOSPC &&
!test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
vsi->state)) {
promisc_forced_on = true;
@@ -1119,7 +1120,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
if (status)
dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
pi->lport, status,
- ice_aq_str(pi->hw->adminq.sq_last_status));
+ libie_aq_str(pi->hw->adminq.sq_last_status));
ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
@@ -1144,6 +1145,9 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
if (link_up == old_link && link_speed == old_link_speed)
return 0;
+ if (!link_up && old_link)
+ pf->link_down_events++;
+
ice_ptp_link_change(pf, link_up);
if (ice_is_dcb_active(pf)) {
@@ -4221,7 +4225,7 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
} else {
vsi->info.sec_flags = ctxt->info.sec_flags;
vsi->info.sw_flags2 = ctxt->info.sw_flags2;
@@ -4764,7 +4768,6 @@ int ice_init_dev(struct ice_pf *pf)
pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
- pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
pf->hw.udp_tunnel_nic.tables[0].n_entries =
@@ -5429,7 +5432,7 @@ static void ice_setup_mc_magic_wake(struct ice_pf *pf)
status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
if (status)
dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
}
/**
@@ -5895,6 +5898,15 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_QSFP), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_SFP), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_SFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835CC_BACKPLANE), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835CC_QSFP56), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835CC_SFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835C_BACKPLANE), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835C_QSFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835C_SFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835_L_BACKPLANE), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835_L_QSFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835_L_SFP), },
/* required last entry */
{}
};
@@ -7900,69 +7912,6 @@ int ice_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
- * ice_eth_ioctl - Access the hwtstamp interface
- * @netdev: network interface device structure
- * @ifr: interface request data
- * @cmd: ioctl command
- */
-static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
-
- switch (cmd) {
- case SIOCGHWTSTAMP:
- return ice_ptp_get_ts_config(pf, ifr);
- case SIOCSHWTSTAMP:
- return ice_ptp_set_ts_config(pf, ifr);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-/**
- * ice_aq_str - convert AQ err code to a string
- * @aq_err: the AQ error code to convert
- */
-const char *ice_aq_str(enum ice_aq_err aq_err)
-{
- switch (aq_err) {
- case ICE_AQ_RC_OK:
- return "OK";
- case ICE_AQ_RC_EPERM:
- return "ICE_AQ_RC_EPERM";
- case ICE_AQ_RC_ENOENT:
- return "ICE_AQ_RC_ENOENT";
- case ICE_AQ_RC_ENOMEM:
- return "ICE_AQ_RC_ENOMEM";
- case ICE_AQ_RC_EBUSY:
- return "ICE_AQ_RC_EBUSY";
- case ICE_AQ_RC_EEXIST:
- return "ICE_AQ_RC_EEXIST";
- case ICE_AQ_RC_EINVAL:
- return "ICE_AQ_RC_EINVAL";
- case ICE_AQ_RC_ENOSPC:
- return "ICE_AQ_RC_ENOSPC";
- case ICE_AQ_RC_ENOSYS:
- return "ICE_AQ_RC_ENOSYS";
- case ICE_AQ_RC_EMODE:
- return "ICE_AQ_RC_EMODE";
- case ICE_AQ_RC_ENOSEC:
- return "ICE_AQ_RC_ENOSEC";
- case ICE_AQ_RC_EBADSIG:
- return "ICE_AQ_RC_EBADSIG";
- case ICE_AQ_RC_ESVN:
- return "ICE_AQ_RC_ESVN";
- case ICE_AQ_RC_EBADMAN:
- return "ICE_AQ_RC_EBADMAN";
- case ICE_AQ_RC_EBADBUF:
- return "ICE_AQ_RC_EBADBUF";
- }
-
- return "ICE_AQ_RC_UNKNOWN";
-}
-
-/**
* ice_set_rss_lut - Set RSS LUT
* @vsi: Pointer to VSI structure
* @lut: Lookup table
@@ -7987,7 +7936,7 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
status = ice_aq_set_rss_lut(hw, &params);
if (status)
dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
return status;
}
@@ -8010,7 +7959,7 @@ int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
if (status)
dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
return status;
}
@@ -8040,7 +7989,7 @@ int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
status = ice_aq_get_rss_lut(hw, &params);
if (status)
dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
return status;
}
@@ -8063,7 +8012,7 @@ int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
if (status)
dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
return status;
}
@@ -8180,7 +8129,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (ret) {
dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
- bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
+ bmode, ret, libie_aq_str(hw->adminq.sq_last_status));
goto out;
}
/* Update sw flags for book keeping */
@@ -8248,7 +8197,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (err) {
netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
mode, err,
- ice_aq_str(hw->adminq.sq_last_status));
+ libie_aq_str(hw->adminq.sq_last_status));
/* revert hw->evb_veb */
hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
return err;
@@ -9755,7 +9704,6 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_change_mtu = ice_change_mtu,
.ndo_get_stats64 = ice_get_stats64,
.ndo_set_tx_maxrate = ice_set_tx_maxrate,
- .ndo_eth_ioctl = ice_eth_ioctl,
.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
.ndo_set_vf_mac = ice_set_vf_mac,
.ndo_get_vf_config = ice_get_vf_cfg,
@@ -9779,4 +9727,6 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_bpf = ice_xdp,
.ndo_xdp_xmit = ice_xdp_xmit,
.ndo_xsk_wakeup = ice_xsk_wakeup,
+ .ndo_hwtstamp_get = ice_ptp_hwtstamp_get,
+ .ndo_hwtstamp_set = ice_ptp_hwtstamp_set,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 59e8879ac059..7e187a804dfa 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -22,10 +22,10 @@ int ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command,
bool read_shadow_ram, struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
struct ice_aqc_nvm *cmd;
- cmd = &desc.params.nvm;
+ cmd = libie_aq_raw(&desc);
if (offset > ICE_AQC_NVM_MAX_OFFSET)
return -EINVAL;
@@ -125,10 +125,10 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command, u8 command_flags,
struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
struct ice_aqc_nvm *cmd;
- cmd = &desc.params.nvm;
+ cmd = libie_aq_raw(&desc);
/* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000)
@@ -146,7 +146,7 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
cmd->offset_high = (offset >> 16) & 0xFF;
cmd->length = cpu_to_le16(length);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
return ice_aq_send_cmd(hw, &desc, data, length, cd);
}
@@ -161,10 +161,10 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
*/
int ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
struct ice_aqc_nvm *cmd;
- cmd = &desc.params.nvm;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_erase);
@@ -869,7 +869,7 @@ static int ice_discover_flash_size(struct ice_hw *hw)
status = ice_read_flat_nvm(hw, offset, &len, &data, false);
if (status == -EIO &&
- hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) {
+ hw->adminq.sq_last_status == LIBIE_AQ_RC_EINVAL) {
ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n",
__func__, offset);
status = 0;
@@ -1182,14 +1182,14 @@ int ice_init_nvm(struct ice_hw *hw)
int ice_nvm_validate_checksum(struct ice_hw *hw)
{
struct ice_aqc_nvm_checksum *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status)
return status;
- cmd = &desc.params.nvm_checksum;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_checksum);
cmd->flags = ICE_AQC_NVM_CHECKSUM_VERIFY;
@@ -1226,11 +1226,11 @@ int ice_nvm_validate_checksum(struct ice_hw *hw)
*/
int ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags)
{
+ struct libie_aq_desc desc;
struct ice_aqc_nvm *cmd;
- struct ice_aq_desc desc;
int err;
- cmd = &desc.params.nvm;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate);
cmd->cmd_flags = (u8)(cmd_flags & 0xFF);
@@ -1252,7 +1252,7 @@ int ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags)
*/
int ice_aq_nvm_update_empr(struct ice_hw *hw)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_update_empr);
@@ -1278,15 +1278,15 @@ ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
u16 length, struct ice_sq_cd *cd)
{
struct ice_aqc_nvm_pkg_data *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
if (length != 0 && !data)
return -EINVAL;
- cmd = &desc.params.pkg_data;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_pkg_data);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
if (del_pkg_data_flag)
cmd->cmd_flags |= ICE_AQC_NVM_PKG_DELETE;
@@ -1316,17 +1316,17 @@ ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length,
u8 *comp_response_code, struct ice_sq_cd *cd)
{
struct ice_aqc_nvm_pass_comp_tbl *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
if (!data || !comp_response || !comp_response_code)
return -EINVAL;
- cmd = &desc.params.pass_comp_tbl;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc,
ice_aqc_opc_nvm_pass_component_tbl);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
cmd->transfer_flag = transfer_flag;
status = ice_aq_send_cmd(hw, &desc, data, length, cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 55cad824c5b9..e358eb1d719f 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -4,7 +4,6 @@
#include "ice.h"
#include "ice_lib.h"
#include "ice_trace.h"
-#include "ice_cgu_regs.h"
static const char ice_pin_names[][64] = {
"SDP0",
@@ -40,21 +39,19 @@ static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = {
{ ONE_PPS, { -1, 5 }, { 0, 1 }},
};
-static const char ice_pin_names_nvm[][64] = {
- "GNSS",
- "SMA1",
- "U.FL1",
- "SMA2",
- "U.FL2",
+static const char ice_pin_names_dpll[][64] = {
+ "SDP20",
+ "SDP21",
+ "SDP22",
+ "SDP23",
};
-static const struct ice_ptp_pin_desc ice_pin_desc_e810_sma[] = {
+static const struct ice_ptp_pin_desc ice_pin_desc_dpll[] = {
/* name, gpio, delay */
- { GNSS, { 1, -1 }, { 0, 0 }},
- { SMA1, { 1, 0 }, { 0, 1 }},
- { UFL1, { -1, 0 }, { 0, 1 }},
- { SMA2, { 3, 2 }, { 0, 1 }},
- { UFL2, { 3, -1 }, { 0, 0 }},
+ { SDP0, { -1, 0 }, { 0, 1 }},
+ { SDP1, { 1, -1 }, { 0, 0 }},
+ { SDP2, { -1, 2 }, { 0, 1 }},
+ { SDP3, { 3, -1 }, { 0, 0 }},
};
static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf)
@@ -93,101 +90,6 @@ static int ice_ptp_find_pin_idx(struct ice_pf *pf, enum ptp_pin_function func,
}
/**
- * ice_ptp_update_sma_data - update SMA pins data according to pins setup
- * @pf: Board private structure
- * @sma_pins: parsed SMA pins status
- * @data: SMA data to update
- */
-static void ice_ptp_update_sma_data(struct ice_pf *pf, unsigned int sma_pins[],
- u8 *data)
-{
- const char *state1, *state2;
-
- /* Set the right state based on the desired configuration.
- * When bit is set, functionality is disabled.
- */
- *data &= ~ICE_ALL_SMA_MASK;
- if (!sma_pins[UFL1 - 1]) {
- if (sma_pins[SMA1 - 1] == PTP_PF_EXTTS) {
- state1 = "SMA1 Rx, U.FL1 disabled";
- *data |= ICE_SMA1_TX_EN;
- } else if (sma_pins[SMA1 - 1] == PTP_PF_PEROUT) {
- state1 = "SMA1 Tx U.FL1 disabled";
- *data |= ICE_SMA1_DIR_EN;
- } else {
- state1 = "SMA1 disabled, U.FL1 disabled";
- *data |= ICE_SMA1_MASK;
- }
- } else {
- /* U.FL1 Tx will always enable SMA1 Rx */
- state1 = "SMA1 Rx, U.FL1 Tx";
- }
-
- if (!sma_pins[UFL2 - 1]) {
- if (sma_pins[SMA2 - 1] == PTP_PF_EXTTS) {
- state2 = "SMA2 Rx, U.FL2 disabled";
- *data |= ICE_SMA2_TX_EN | ICE_SMA2_UFL2_RX_DIS;
- } else if (sma_pins[SMA2 - 1] == PTP_PF_PEROUT) {
- state2 = "SMA2 Tx, U.FL2 disabled";
- *data |= ICE_SMA2_DIR_EN | ICE_SMA2_UFL2_RX_DIS;
- } else {
- state2 = "SMA2 disabled, U.FL2 disabled";
- *data |= ICE_SMA2_MASK;
- }
- } else {
- if (!sma_pins[SMA2 - 1]) {
- state2 = "SMA2 disabled, U.FL2 Rx";
- *data |= ICE_SMA2_DIR_EN | ICE_SMA2_TX_EN;
- } else {
- state2 = "SMA2 Tx, U.FL2 Rx";
- *data |= ICE_SMA2_DIR_EN;
- }
- }
-
- dev_dbg(ice_pf_to_dev(pf), "%s, %s\n", state1, state2);
-}
-
-/**
- * ice_ptp_set_sma_cfg - set the configuration of the SMA control logic
- * @pf: Board private structure
- *
- * Return: 0 on success, negative error code otherwise
- */
-static int ice_ptp_set_sma_cfg(struct ice_pf *pf)
-{
- const struct ice_ptp_pin_desc *ice_pins = pf->ptp.ice_pin_desc;
- struct ptp_pin_desc *pins = pf->ptp.pin_desc;
- unsigned int sma_pins[ICE_SMA_PINS_NUM] = {};
- int err;
- u8 data;
-
- /* Read initial pin state value */
- err = ice_read_sma_ctrl(&pf->hw, &data);
- if (err)
- return err;
-
- /* Get SMA/U.FL pins states */
- for (int i = 0; i < pf->ptp.info.n_pins; i++)
- if (pins[i].func) {
- int name_idx = ice_pins[i].name_idx;
-
- switch (name_idx) {
- case SMA1:
- case UFL1:
- case SMA2:
- case UFL2:
- sma_pins[name_idx - 1] = pins[i].func;
- break;
- default:
- continue;
- }
- }
-
- ice_ptp_update_sma_data(pf, sma_pins, &data);
- return ice_write_sma_ctrl(&pf->hw, data);
-}
-
-/**
* ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device
* @pf: Board private structure
*
@@ -1734,7 +1636,7 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
int err;
/* Enable/disable CGU 1PPS output for E825C */
- err = ice_cgu_cfg_pps_out(hw, !!period);
+ err = ice_tspll_cfg_pps_out_e825c(hw, !!period);
if (err)
return err;
}
@@ -1879,63 +1781,6 @@ static void ice_ptp_enable_all_perout(struct ice_pf *pf)
}
/**
- * ice_ptp_disable_shared_pin - Disable enabled pin that shares GPIO
- * @pf: Board private structure
- * @pin: Pin index
- * @func: Assigned function
- *
- * Return: 0 on success, negative error code otherwise
- */
-static int ice_ptp_disable_shared_pin(struct ice_pf *pf, unsigned int pin,
- enum ptp_pin_function func)
-{
- unsigned int gpio_pin;
-
- switch (func) {
- case PTP_PF_PEROUT:
- gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[1];
- break;
- case PTP_PF_EXTTS:
- gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[0];
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) {
- struct ptp_pin_desc *pin_desc = &pf->ptp.pin_desc[i];
- unsigned int chan = pin_desc->chan;
-
- /* Skip pin idx from the request */
- if (i == pin)
- continue;
-
- if (pin_desc->func == PTP_PF_PEROUT &&
- pf->ptp.ice_pin_desc[i].gpio[1] == gpio_pin) {
- pf->ptp.perout_rqs[chan].period.sec = 0;
- pf->ptp.perout_rqs[chan].period.nsec = 0;
- pin_desc->func = PTP_PF_NONE;
- pin_desc->chan = 0;
- dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared output GPIO pin %u\n",
- i, gpio_pin);
- return ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[chan],
- false);
- } else if (pf->ptp.pin_desc->func == PTP_PF_EXTTS &&
- pf->ptp.ice_pin_desc[i].gpio[0] == gpio_pin) {
- pf->ptp.extts_rqs[chan].flags &= ~PTP_ENABLE_FEATURE;
- pin_desc->func = PTP_PF_NONE;
- pin_desc->chan = 0;
- dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared input GPIO pin %u\n",
- i, gpio_pin);
- return ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[chan],
- false);
- }
- }
-
- return 0;
-}
-
-/**
* ice_verify_pin - verify if pin supports requested pin function
* @info: the driver's PTP info structure
* @pin: Pin index
@@ -1969,14 +1814,6 @@ static int ice_verify_pin(struct ptp_clock_info *info, unsigned int pin,
return -EOPNOTSUPP;
}
- /* On adapters with SMA_CTRL disable other pins that share same GPIO */
- if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
- ice_ptp_disable_shared_pin(pf, pin, func);
- pf->ptp.pin_desc[pin].func = func;
- pf->ptp.pin_desc[pin].chan = chan;
- return ice_ptp_set_sma_cfg(pf);
- }
-
return 0;
}
@@ -2360,23 +2197,24 @@ static int ice_ptp_getcrosststamp(struct ptp_clock_info *info,
}
/**
- * ice_ptp_get_ts_config - ioctl interface to read the timestamping config
- * @pf: Board private structure
- * @ifr: ioctl data
+ * ice_ptp_hwtstamp_get - interface to read the timestamping config
+ * @netdev: Pointer to network interface device structure
+ * @config: Timestamping configuration structure
*
* Copy the timestamping config to user buffer
*/
-int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
+int ice_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config *config;
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
if (pf->ptp.state != ICE_PTP_READY)
return -EIO;
- config = &pf->ptp.tstamp_config;
+ *config = pf->ptp.tstamp_config;
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
+ return 0;
}
/**
@@ -2384,8 +2222,8 @@ int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
* @pf: Board private structure
* @config: hwtstamp settings requested or saved
*/
-static int
-ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
+static int ice_ptp_set_timestamp_mode(struct ice_pf *pf,
+ struct kernel_hwtstamp_config *config)
{
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
@@ -2429,32 +2267,32 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
}
/**
- * ice_ptp_set_ts_config - ioctl interface to control the timestamping
- * @pf: Board private structure
- * @ifr: ioctl data
+ * ice_ptp_hwtstamp_set - interface to control the timestamping
+ * @netdev: Pointer to network interface device structure
+ * @config: Timestamping configuration structure
+ * @extack: Netlink extended ack structure for error reporting
*
* Get the user config and store it
*/
-int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
+int ice_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
int err;
if (pf->ptp.state != ICE_PTP_READY)
return -EAGAIN;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = ice_ptp_set_timestamp_mode(pf, &config);
+ err = ice_ptp_set_timestamp_mode(pf, config);
if (err)
return err;
/* Return the actual configuration set */
- config = pf->ptp.tstamp_config;
+ *config = pf->ptp.tstamp_config;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
/**
@@ -2500,14 +2338,14 @@ static void ice_ptp_setup_pin_cfg(struct ice_pf *pf)
for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) {
const struct ice_ptp_pin_desc *desc = &pf->ptp.ice_pin_desc[i];
struct ptp_pin_desc *pin = &pf->ptp.pin_desc[i];
- const char *name = NULL;
+ const char *name;
if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
name = ice_pin_names[desc->name_idx];
- else if (desc->name_idx != GPIO_NA)
- name = ice_pin_names_nvm[desc->name_idx];
- if (name)
- strscpy(pin->name, name, sizeof(pin->name));
+ else
+ name = ice_pin_names_dpll[desc->name_idx];
+
+ strscpy(pin->name, name, sizeof(pin->name));
pin->index = i;
}
@@ -2519,8 +2357,8 @@ static void ice_ptp_setup_pin_cfg(struct ice_pf *pf)
* ice_ptp_disable_pins - Disable PTP pins
* @pf: pointer to the PF structure
*
- * Disable the OS access to the SMA pins. Called to clear out the OS
- * indications of pin support when we fail to setup the SMA control register.
+ * Disable the OS access to the pins. Called to clear out the OS
+ * indications of pin support when we fail to setup pin array.
*/
static void ice_ptp_disable_pins(struct ice_pf *pf)
{
@@ -2561,40 +2399,30 @@ static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries,
for (i = 0; i < num_entries; i++) {
u16 entry = le16_to_cpu(entries[i]);
DECLARE_BITMAP(bitmap, GPIO_NA);
- unsigned int bitmap_idx;
+ unsigned int idx;
bool dir;
u16 gpio;
*bitmap = FIELD_GET(ICE_AQC_NVM_SDP_AC_PIN_M, entry);
+
+ /* Check if entry's pin bitmap is valid. */
+ if (bitmap_empty(bitmap, GPIO_NA))
+ continue;
+
dir = !!FIELD_GET(ICE_AQC_NVM_SDP_AC_DIR_M, entry);
gpio = FIELD_GET(ICE_AQC_NVM_SDP_AC_SDP_NUM_M, entry);
- for_each_set_bit(bitmap_idx, bitmap, GPIO_NA + 1) {
- unsigned int idx;
- /* Check if entry's pin bit is valid */
- if (bitmap_idx >= NUM_PTP_PINS_NVM &&
- bitmap_idx != GPIO_NA)
- continue;
-
- /* Check if pin already exists */
- for (idx = 0; idx < ICE_N_PINS_MAX; idx++)
- if (pins[idx].name_idx == bitmap_idx)
- break;
-
- if (idx == ICE_N_PINS_MAX) {
- /* Pin not found, setup its entry and name */
- idx = n_pins++;
- pins[idx].name_idx = bitmap_idx;
- if (bitmap_idx == GPIO_NA)
- strscpy(pf->ptp.pin_desc[idx].name,
- ice_pin_names[gpio],
- sizeof(pf->ptp.pin_desc[idx]
- .name));
- }
+ for (idx = 0; idx < ICE_N_PINS_MAX; idx++) {
+ if (pins[idx].name_idx == gpio)
+ break;
+ }
- /* Setup in/out GPIO number */
- pins[idx].gpio[dir] = gpio;
+ if (idx == ICE_N_PINS_MAX) {
+ /* Pin not found, setup its entry and name */
+ idx = n_pins++;
+ pins[idx].name_idx = gpio;
}
+ pins[idx].gpio[dir] = gpio;
}
for (i = 0; i < n_pins; i++) {
@@ -2622,10 +2450,10 @@ static void ice_ptp_set_funcs_e82x(struct ice_pf *pf)
if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825) {
pf->ptp.ice_pin_desc = ice_pin_desc_e825c;
- pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e825c);
+ pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e825c);
} else {
pf->ptp.ice_pin_desc = ice_pin_desc_e82x;
- pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e82x);
+ pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e82x);
}
ice_ptp_setup_pin_cfg(pf);
}
@@ -2651,15 +2479,13 @@ static void ice_ptp_set_funcs_e810(struct ice_pf *pf)
if (err) {
/* SDP section does not exist in NVM or is corrupted */
if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
- ptp->ice_pin_desc = ice_pin_desc_e810_sma;
- ptp->info.n_pins =
- ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810_sma);
+ ptp->ice_pin_desc = ice_pin_desc_dpll;
+ ptp->info.n_pins = ARRAY_SIZE(ice_pin_desc_dpll);
} else {
pf->ptp.ice_pin_desc = ice_pin_desc_e810;
- pf->ptp.info.n_pins =
- ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810);
- err = 0;
+ pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e810);
}
+ err = 0;
} else {
desc = devm_kcalloc(ice_pf_to_dev(pf), ICE_N_PINS_MAX,
sizeof(struct ice_ptp_pin_desc),
@@ -2677,8 +2503,6 @@ static void ice_ptp_set_funcs_e810(struct ice_pf *pf)
ptp->info.pin_config = ptp->pin_desc;
ice_ptp_setup_pin_cfg(pf);
- if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
- err = ice_ptp_set_sma_cfg(pf);
err:
if (err) {
devm_kfree(ice_pf_to_dev(pf), desc);
@@ -2704,7 +2528,7 @@ static void ice_ptp_set_funcs_e830(struct ice_pf *pf)
#endif /* CONFIG_ICE_HWTS */
/* Rest of the config is the same as base E810 */
pf->ptp.ice_pin_desc = ice_pin_desc_e810;
- pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810);
+ pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e810);
ice_ptp_setup_pin_cfg(pf);
}
@@ -3069,6 +2893,10 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf)
if (err)
return err;
+ err = ice_tspll_init(hw);
+ if (err)
+ return err;
+
/* Acquire the global hardware lock */
if (!ice_ptp_lock(hw)) {
err = -EBUSY;
@@ -3236,6 +3064,13 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
return err;
}
+ err = ice_tspll_init(hw);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "Failed to initialize CGU, status %d\n",
+ err);
+ return err;
+ }
+
/* Acquire the global hardware lock */
if (!ice_ptp_lock(hw)) {
err = -EBUSY;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 3b769a0cad00..137f2070a2d9 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -202,9 +202,6 @@ enum ice_ptp_pin_nvm {
/* Pin definitions for PTP */
#define ICE_N_PINS_MAX 6
-#define ICE_SMA_PINS_NUM 4
-#define ICE_PIN_DESC_ARR_LEN(_arr) (sizeof(_arr) / \
- sizeof(struct ice_ptp_pin_desc))
/**
* struct ice_ptp_pin_desc - hardware pin description data
@@ -262,7 +259,7 @@ struct ice_ptp {
struct ptp_extts_request extts_rqs[GLTSYN_EVNT_H_IDX_MAX];
struct ptp_clock_info info;
struct ptp_clock *clock;
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
u64 reset_time;
u32 tx_hwtstamp_skipped;
u32 tx_hwtstamp_timeouts;
@@ -294,8 +291,11 @@ struct ice_ptp {
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
int ice_ptp_clock_index(struct ice_pf *pf);
struct ice_pf;
-int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr);
-int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr);
+int ice_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int ice_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void ice_ptp_restore_timestamp_mode(struct ice_pf *pf);
void ice_ptp_extts_event(struct ice_pf *pf);
@@ -316,12 +316,16 @@ void ice_ptp_init(struct ice_pf *pf);
void ice_ptp_release(struct ice_pf *pf);
void ice_ptp_link_change(struct ice_pf *pf, bool linkup);
#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
-static inline int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
+
+static inline int ice_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
return -EOPNOTSUPP;
}
-static inline int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
+static inline int ice_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
index 003cdfada3ca..19dddd9b53dd 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
@@ -281,7 +281,7 @@ struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = {
/* struct ice_time_ref_info_e82x
*
- * E822 hardware can use different sources as the reference for the PTP
+ * E82X hardware can use different sources as the reference for the PTP
* hardware clock. Each clock has different characteristics such as a slightly
* different frequency, etc.
*
@@ -289,8 +289,8 @@ struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = {
* reference. See the struct ice_time_ref_info_e82x for information about the
* meaning of each constant.
*/
-const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
- /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
+const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TSPLL_FREQ] = {
+ /* ICE_TSPLL_FREQ_25_000 -> 25 MHz */
{
/* pll_freq */
823437500, /* 823.4375 MHz PLL */
@@ -298,7 +298,7 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
0x136e44fabULL,
},
- /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
+ /* ICE_TSPLL_FREQ_122_880 -> 122.88 MHz */
{
/* pll_freq */
783360000, /* 783.36 MHz */
@@ -306,7 +306,7 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
0x146cc2177ULL,
},
- /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
+ /* ICE_TSPLL_FREQ_125_000 -> 125 MHz */
{
/* pll_freq */
796875000, /* 796.875 MHz */
@@ -314,7 +314,7 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
0x141414141ULL,
},
- /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
+ /* ICE_TSPLL_FREQ_153_600 -> 153.6 MHz */
{
/* pll_freq */
816000000, /* 816 MHz */
@@ -322,7 +322,7 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
0x139b9b9baULL,
},
- /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
+ /* ICE_TSPLL_FREQ_156_250 -> 156.25 MHz */
{
/* pll_freq */
830078125, /* 830.78125 MHz */
@@ -330,7 +330,7 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
0x134679aceULL,
},
- /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
+ /* ICE_TSPLL_FREQ_245_760 -> 245.76 MHz */
{
/* pll_freq */
783360000, /* 783.36 MHz */
@@ -339,167 +339,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
},
};
-const struct ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
- /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
- {
- /* refclk_pre_div */
- 1,
- /* feedback_div */
- 197,
- /* frac_n_div */
- 2621440,
- /* post_pll_div */
- 6,
- },
-
- /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
- {
- /* refclk_pre_div */
- 5,
- /* feedback_div */
- 223,
- /* frac_n_div */
- 524288,
- /* post_pll_div */
- 7,
- },
-
- /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
- {
- /* refclk_pre_div */
- 5,
- /* feedback_div */
- 223,
- /* frac_n_div */
- 524288,
- /* post_pll_div */
- 7,
- },
-
- /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
- {
- /* refclk_pre_div */
- 5,
- /* feedback_div */
- 159,
- /* frac_n_div */
- 1572864,
- /* post_pll_div */
- 6,
- },
-
- /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
- {
- /* refclk_pre_div */
- 5,
- /* feedback_div */
- 159,
- /* frac_n_div */
- 1572864,
- /* post_pll_div */
- 6,
- },
-
- /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
- {
- /* refclk_pre_div */
- 10,
- /* feedback_div */
- 223,
- /* frac_n_div */
- 524288,
- /* post_pll_div */
- 7,
- },
-};
-
-const
-struct ice_cgu_pll_params_e825c e825c_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
- /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
- {
- /* tspll_ck_refclkfreq */
- 0x19,
- /* tspll_ndivratio */
- 1,
- /* tspll_fbdiv_intgr */
- 320,
- /* tspll_fbdiv_frac */
- 0,
- /* ref1588_ck_div */
- 0,
- },
-
- /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
- {
- /* tspll_ck_refclkfreq */
- 0x29,
- /* tspll_ndivratio */
- 3,
- /* tspll_fbdiv_intgr */
- 195,
- /* tspll_fbdiv_frac */
- 1342177280UL,
- /* ref1588_ck_div */
- 0,
- },
-
- /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
- {
- /* tspll_ck_refclkfreq */
- 0x3E,
- /* tspll_ndivratio */
- 2,
- /* tspll_fbdiv_intgr */
- 128,
- /* tspll_fbdiv_frac */
- 0,
- /* ref1588_ck_div */
- 0,
- },
-
- /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
- {
- /* tspll_ck_refclkfreq */
- 0x33,
- /* tspll_ndivratio */
- 3,
- /* tspll_fbdiv_intgr */
- 156,
- /* tspll_fbdiv_frac */
- 1073741824UL,
- /* ref1588_ck_div */
- 0,
- },
-
- /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
- {
- /* tspll_ck_refclkfreq */
- 0x1F,
- /* tspll_ndivratio */
- 5,
- /* tspll_fbdiv_intgr */
- 256,
- /* tspll_fbdiv_frac */
- 0,
- /* ref1588_ck_div */
- 0,
- },
-
- /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
- {
- /* tspll_ck_refclkfreq */
- 0x52,
- /* tspll_ndivratio */
- 3,
- /* tspll_fbdiv_intgr */
- 97,
- /* tspll_fbdiv_frac */
- 2818572288UL,
- /* ref1588_ck_div */
- 0,
- },
-};
-
/* struct ice_vernier_info_e82x
*
* E822 hardware calibrates the delay of the timestamp indication from the
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index ccac84eb34c9..35680dbe4a7f 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -6,7 +6,6 @@
#include "ice_common.h"
#include "ice_ptp_hw.h"
#include "ice_ptp_consts.h"
-#include "ice_cgu_regs.h"
static struct dpll_pin_frequency ice_cgu_pin_freq_common[] = {
DPLL_PIN_FREQUENCY_1PPS,
@@ -150,7 +149,7 @@ static const struct ice_cgu_pin_desc ice_e823_zl_cgu_outputs[] = {
* | 8 bit s | | 32 bits |
* +---------------+ +---------------+
*
- * The increment value is added to the GLSTYN_TIME_R and GLSTYN_TIME_L
+ * The increment value is added to the GLTSYN_TIME_R and GLTSYN_TIME_L
* registers every clock source tick. Depending on the specific device
* configuration, the clock source frequency could be one of a number of
* values.
@@ -226,547 +225,6 @@ static u64 ice_ptp_read_src_incval(struct ice_hw *hw)
}
/**
- * ice_read_cgu_reg_e82x - Read a CGU register
- * @hw: pointer to the HW struct
- * @addr: Register address to read
- * @val: storage for register value read
- *
- * Read the contents of a register of the Clock Generation Unit. Only
- * applicable to E822 devices.
- *
- * Return: 0 on success, other error codes when failed to read from CGU
- */
-static int ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val)
-{
- struct ice_sbq_msg_input cgu_msg = {
- .opcode = ice_sbq_msg_rd,
- .dest_dev = ice_sbq_dev_cgu,
- .msg_addr_low = addr
- };
- int err;
-
- err = ice_sbq_rw_reg(hw, &cgu_msg, ICE_AQ_FLAG_RD);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
- addr, err);
- return err;
- }
-
- *val = cgu_msg.data;
-
- return 0;
-}
-
-/**
- * ice_write_cgu_reg_e82x - Write a CGU register
- * @hw: pointer to the HW struct
- * @addr: Register address to write
- * @val: value to write into the register
- *
- * Write the specified value to a register of the Clock Generation Unit. Only
- * applicable to E822 devices.
- *
- * Return: 0 on success, other error codes when failed to write to CGU
- */
-static int ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val)
-{
- struct ice_sbq_msg_input cgu_msg = {
- .opcode = ice_sbq_msg_wr,
- .dest_dev = ice_sbq_dev_cgu,
- .msg_addr_low = addr,
- .data = val
- };
- int err;
-
- err = ice_sbq_rw_reg(hw, &cgu_msg, ICE_AQ_FLAG_RD);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
- addr, err);
- return err;
- }
-
- return err;
-}
-
-/**
- * ice_clk_freq_str - Convert time_ref_freq to string
- * @clk_freq: Clock frequency
- *
- * Return: specified TIME_REF clock frequency converted to a string
- */
-static const char *ice_clk_freq_str(enum ice_time_ref_freq clk_freq)
-{
- switch (clk_freq) {
- case ICE_TIME_REF_FREQ_25_000:
- return "25 MHz";
- case ICE_TIME_REF_FREQ_122_880:
- return "122.88 MHz";
- case ICE_TIME_REF_FREQ_125_000:
- return "125 MHz";
- case ICE_TIME_REF_FREQ_153_600:
- return "153.6 MHz";
- case ICE_TIME_REF_FREQ_156_250:
- return "156.25 MHz";
- case ICE_TIME_REF_FREQ_245_760:
- return "245.76 MHz";
- default:
- return "Unknown";
- }
-}
-
-/**
- * ice_clk_src_str - Convert time_ref_src to string
- * @clk_src: Clock source
- *
- * Return: specified clock source converted to its string name
- */
-static const char *ice_clk_src_str(enum ice_clk_src clk_src)
-{
- switch (clk_src) {
- case ICE_CLK_SRC_TCXO:
- return "TCXO";
- case ICE_CLK_SRC_TIME_REF:
- return "TIME_REF";
- default:
- return "Unknown";
- }
-}
-
-/**
- * ice_cfg_cgu_pll_e82x - Configure the Clock Generation Unit
- * @hw: pointer to the HW struct
- * @clk_freq: Clock frequency to program
- * @clk_src: Clock source to select (TIME_REF, or TCXO)
- *
- * Configure the Clock Generation Unit with the desired clock frequency and
- * time reference, enabling the PLL which drives the PTP hardware clock.
- *
- * Return:
- * * %0 - success
- * * %-EINVAL - input parameters are incorrect
- * * %-EBUSY - failed to lock TS PLL
- * * %other - CGU read/write failure
- */
-static int ice_cfg_cgu_pll_e82x(struct ice_hw *hw,
- enum ice_time_ref_freq clk_freq,
- enum ice_clk_src clk_src)
-{
- union tspll_ro_bwm_lf bwm_lf;
- union nac_cgu_dword19 dw19;
- union nac_cgu_dword22 dw22;
- union nac_cgu_dword24 dw24;
- union nac_cgu_dword9 dw9;
- int err;
-
- if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
- dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
- clk_freq);
- return -EINVAL;
- }
-
- if (clk_src >= NUM_ICE_CLK_SRC) {
- dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
- clk_src);
- return -EINVAL;
- }
-
- if (clk_src == ICE_CLK_SRC_TCXO &&
- clk_freq != ICE_TIME_REF_FREQ_25_000) {
- dev_warn(ice_hw_to_dev(hw),
- "TCXO only supports 25 MHz frequency\n");
- return -EINVAL;
- }
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
- if (err)
- return err;
-
- /* Log the current clock configuration */
- ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- str_enabled_disabled(dw24.ts_pll_enable),
- ice_clk_src_str(dw24.time_ref_sel),
- ice_clk_freq_str(dw9.time_ref_freq_sel),
- bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked");
-
- /* Disable the PLL before changing the clock source or frequency */
- if (dw24.ts_pll_enable) {
- dw24.ts_pll_enable = 0;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
- if (err)
- return err;
- }
-
- /* Set the frequency */
- dw9.time_ref_freq_sel = clk_freq;
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
- if (err)
- return err;
-
- /* Configure the TS PLL feedback divisor */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val);
- if (err)
- return err;
-
- dw19.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
- dw19.tspll_ndivratio = 1;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val);
- if (err)
- return err;
-
- /* Configure the TS PLL post divisor */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val);
- if (err)
- return err;
-
- dw22.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
- dw22.time1588clk_sel_div2 = 0;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val);
- if (err)
- return err;
-
- /* Configure the TS PLL pre divisor and clock source */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
- if (err)
- return err;
-
- dw24.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div;
- dw24.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
- dw24.time_ref_sel = clk_src;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
- if (err)
- return err;
-
- /* Finally, enable the PLL */
- dw24.ts_pll_enable = 1;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
- if (err)
- return err;
-
- /* Wait to verify if the PLL locks */
- usleep_range(1000, 5000);
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
- if (err)
- return err;
-
- if (!bwm_lf.plllock_true_lock_cri) {
- dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
- return -EBUSY;
- }
-
- /* Log the current clock configuration */
- ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- str_enabled_disabled(dw24.ts_pll_enable),
- ice_clk_src_str(dw24.time_ref_sel),
- ice_clk_freq_str(dw9.time_ref_freq_sel),
- bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked");
-
- return 0;
-}
-
-/**
- * ice_cfg_cgu_pll_e825c - Configure the Clock Generation Unit for E825-C
- * @hw: pointer to the HW struct
- * @clk_freq: Clock frequency to program
- * @clk_src: Clock source to select (TIME_REF, or TCXO)
- *
- * Configure the Clock Generation Unit with the desired clock frequency and
- * time reference, enabling the PLL which drives the PTP hardware clock.
- *
- * Return:
- * * %0 - success
- * * %-EINVAL - input parameters are incorrect
- * * %-EBUSY - failed to lock TS PLL
- * * %other - CGU read/write failure
- */
-static int ice_cfg_cgu_pll_e825c(struct ice_hw *hw,
- enum ice_time_ref_freq clk_freq,
- enum ice_clk_src clk_src)
-{
- union tspll_ro_lock_e825c ro_lock;
- union nac_cgu_dword16_e825c dw16;
- union nac_cgu_dword23_e825c dw23;
- union nac_cgu_dword19 dw19;
- union nac_cgu_dword22 dw22;
- union nac_cgu_dword24 dw24;
- union nac_cgu_dword9 dw9;
- int err;
-
- if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
- dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
- clk_freq);
- return -EINVAL;
- }
-
- if (clk_src >= NUM_ICE_CLK_SRC) {
- dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
- clk_src);
- return -EINVAL;
- }
-
- if (clk_src == ICE_CLK_SRC_TCXO &&
- clk_freq != ICE_TIME_REF_FREQ_156_250) {
- dev_warn(ice_hw_to_dev(hw),
- "TCXO only supports 156.25 MHz frequency\n");
- return -EINVAL;
- }
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD16_E825C, &dw16.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, &dw23.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_LOCK_E825C, &ro_lock.val);
- if (err)
- return err;
-
- /* Log the current clock configuration */
- ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- str_enabled_disabled(dw24.ts_pll_enable),
- ice_clk_src_str(dw23.time_ref_sel),
- ice_clk_freq_str(dw9.time_ref_freq_sel),
- ro_lock.plllock_true_lock_cri ? "locked" : "unlocked");
-
- /* Disable the PLL before changing the clock source or frequency */
- if (dw23.ts_pll_enable) {
- dw23.ts_pll_enable = 0;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C,
- dw23.val);
- if (err)
- return err;
- }
-
- /* Set the frequency */
- dw9.time_ref_freq_sel = clk_freq;
-
- /* Enable the correct receiver */
- if (clk_src == ICE_CLK_SRC_TCXO) {
- dw9.time_ref_en = 0;
- dw9.clk_eref0_en = 1;
- } else {
- dw9.time_ref_en = 1;
- dw9.clk_eref0_en = 0;
- }
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
- if (err)
- return err;
-
- /* Choose the referenced frequency */
- dw16.tspll_ck_refclkfreq =
- e825c_cgu_params[clk_freq].tspll_ck_refclkfreq;
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD16_E825C, dw16.val);
- if (err)
- return err;
-
- /* Configure the TS PLL feedback divisor */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val);
- if (err)
- return err;
-
- dw19.tspll_fbdiv_intgr =
- e825c_cgu_params[clk_freq].tspll_fbdiv_intgr;
- dw19.tspll_ndivratio =
- e825c_cgu_params[clk_freq].tspll_ndivratio;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val);
- if (err)
- return err;
-
- /* Configure the TS PLL post divisor */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val);
- if (err)
- return err;
-
- /* These two are constant for E825C */
- dw22.time1588clk_div = 5;
- dw22.time1588clk_sel_div2 = 0;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val);
- if (err)
- return err;
-
- /* Configure the TS PLL pre divisor and clock source */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, &dw23.val);
- if (err)
- return err;
-
- dw23.ref1588_ck_div =
- e825c_cgu_params[clk_freq].ref1588_ck_div;
- dw23.time_ref_sel = clk_src;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, dw23.val);
- if (err)
- return err;
-
- dw24.tspll_fbdiv_frac =
- e825c_cgu_params[clk_freq].tspll_fbdiv_frac;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
- if (err)
- return err;
-
- /* Finally, enable the PLL */
- dw23.ts_pll_enable = 1;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, dw23.val);
- if (err)
- return err;
-
- /* Wait to verify if the PLL locks */
- usleep_range(1000, 5000);
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_LOCK_E825C, &ro_lock.val);
- if (err)
- return err;
-
- if (!ro_lock.plllock_true_lock_cri) {
- dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
- return -EBUSY;
- }
-
- /* Log the current clock configuration */
- ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- str_enabled_disabled(dw24.ts_pll_enable),
- ice_clk_src_str(dw23.time_ref_sel),
- ice_clk_freq_str(dw9.time_ref_freq_sel),
- ro_lock.plllock_true_lock_cri ? "locked" : "unlocked");
-
- return 0;
-}
-
-#define ICE_ONE_PPS_OUT_AMP_MAX 3
-
-/**
- * ice_cgu_cfg_pps_out - Configure 1PPS output from CGU
- * @hw: pointer to the HW struct
- * @enable: true to enable 1PPS output, false to disable it
- *
- * Return: 0 on success, other negative error code when CGU read/write failed
- */
-int ice_cgu_cfg_pps_out(struct ice_hw *hw, bool enable)
-{
- union nac_cgu_dword9 dw9;
- int err;
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
- if (err)
- return err;
-
- dw9.one_pps_out_en = enable;
- dw9.one_pps_out_amp = enable * ICE_ONE_PPS_OUT_AMP_MAX;
- return ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
-}
-
-/**
- * ice_cfg_cgu_pll_dis_sticky_bits_e82x - disable TS PLL sticky bits
- * @hw: pointer to the HW struct
- *
- * Configure the Clock Generation Unit TS PLL sticky bits so they don't latch on
- * losing TS PLL lock, but always show current state.
- *
- * Return: 0 on success, other error codes when failed to read/write CGU
- */
-static int ice_cfg_cgu_pll_dis_sticky_bits_e82x(struct ice_hw *hw)
-{
- union tspll_cntr_bist_settings cntr_bist;
- int err;
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
- &cntr_bist.val);
- if (err)
- return err;
-
- /* Disable sticky lock detection so lock err reported is accurate */
- cntr_bist.i_plllock_sel_0 = 0;
- cntr_bist.i_plllock_sel_1 = 0;
-
- return ice_write_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
- cntr_bist.val);
-}
-
-/**
- * ice_cfg_cgu_pll_dis_sticky_bits_e825c - disable TS PLL sticky bits for E825-C
- * @hw: pointer to the HW struct
- *
- * Configure the Clock Generation Unit TS PLL sticky bits so they don't latch on
- * losing TS PLL lock, but always show current state.
- *
- * Return: 0 on success, other error codes when failed to read/write CGU
- */
-static int ice_cfg_cgu_pll_dis_sticky_bits_e825c(struct ice_hw *hw)
-{
- union tspll_bw_tdc_e825c bw_tdc;
- int err;
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_BW_TDC_E825C, &bw_tdc.val);
- if (err)
- return err;
-
- bw_tdc.i_plllock_sel_1_0 = 0;
-
- return ice_write_cgu_reg_e82x(hw, TSPLL_BW_TDC_E825C, bw_tdc.val);
-}
-
-/**
- * ice_init_cgu_e82x - Initialize CGU with settings from firmware
- * @hw: pointer to the HW structure
- *
- * Initialize the Clock Generation Unit of the E822 device.
- *
- * Return: 0 on success, other error codes when failed to read/write/cfg CGU
- */
-static int ice_init_cgu_e82x(struct ice_hw *hw)
-{
- struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
- int err;
-
- /* Disable sticky lock detection so lock err reported is accurate */
- if (hw->mac_type == ICE_MAC_GENERIC_3K_E825)
- err = ice_cfg_cgu_pll_dis_sticky_bits_e825c(hw);
- else
- err = ice_cfg_cgu_pll_dis_sticky_bits_e82x(hw);
- if (err)
- return err;
-
- /* Configure the CGU PLL using the parameters from the function
- * capabilities.
- */
- if (hw->mac_type == ICE_MAC_GENERIC_3K_E825)
- err = ice_cfg_cgu_pll_e825c(hw, ts_info->time_ref,
- (enum ice_clk_src)ts_info->clk_src);
- else
- err = ice_cfg_cgu_pll_e82x(hw, ts_info->time_ref,
- (enum ice_clk_src)ts_info->clk_src);
-
- return err;
-}
-
-/**
* ice_ptp_tmr_cmd_to_src_reg - Convert to source timer command value
* @hw: pointer to HW struct
* @cmd: Timer command
@@ -966,7 +424,7 @@ static int ice_write_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 val)
};
int err;
- err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err)
ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
err);
@@ -993,7 +451,7 @@ static int ice_read_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 *val)
};
int err;
- err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err)
ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
err);
@@ -2658,20 +2116,6 @@ int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port)
}
/**
- * ice_ptp_init_phc_e825 - Perform E825 specific PHC initialization
- * @hw: pointer to HW struct
- *
- * Perform E825-specific PTP hardware clock initialization steps.
- *
- * Return: 0 on success, negative error code otherwise.
- */
-static int ice_ptp_init_phc_e825(struct ice_hw *hw)
-{
- /* Initialize the Clock Generation Unit */
- return ice_init_cgu_e82x(hw);
-}
-
-/**
* ice_ptp_read_tx_hwtstamp_status_eth56g - Get TX timestamp status
* @hw: pointer to the HW struct
* @ts_status: the timestamp mask pointer
@@ -2904,7 +2348,7 @@ ice_read_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
ice_fill_phy_msg_e82x(hw, &msg, port, offset);
msg.opcode = ice_sbq_msg_rd;
- err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -2982,7 +2426,7 @@ ice_write_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 val)
msg.opcode = ice_sbq_msg_wr;
msg.data = val;
- err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -3143,7 +2587,7 @@ ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
msg.opcode = ice_sbq_msg_rd;
- err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -3178,7 +2622,7 @@ ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
msg.opcode = ice_sbq_msg_wr;
msg.data = val;
- err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -3330,7 +2774,6 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
*/
static int ice_ptp_init_phc_e82x(struct ice_hw *hw)
{
- int err;
u32 val;
/* Enable reading switch and PHY registers over the sideband queue */
@@ -3340,11 +2783,6 @@ static int ice_ptp_init_phc_e82x(struct ice_hw *hw)
val |= (PF_SB_REM_DEV_CTL_SWITCH_READ | PF_SB_REM_DEV_CTL_PHY0);
wr32(hw, PF_SB_REM_DEV_CTL, val);
- /* Initialize the Clock Generation Unit */
- err = ice_init_cgu_e82x(hw);
- if (err)
- return err;
-
/* Set window length for all the ports */
return ice_ptp_set_vernier_wl(hw);
}
@@ -4829,7 +4267,7 @@ static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
msg.opcode = ice_sbq_msg_rd;
msg.dest_dev = ice_sbq_dev_phy_0;
- err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -4860,7 +4298,7 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
msg.dest_dev = ice_sbq_dev_phy_0;
msg.data = val;
- err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -6126,7 +5564,7 @@ int ice_ptp_init_phc(struct ice_hw *hw)
case ICE_MAC_GENERIC:
return ice_ptp_init_phc_e82x(hw);
case ICE_MAC_GENERIC_3K_E825:
- return ice_ptp_init_phc_e825(hw);
+ return 0;
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 83f20fa7ace7..5896b346e579 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -194,23 +194,6 @@ struct ice_eth56g_mac_reg_cfg {
extern
const struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD];
-/**
- * struct ice_cgu_pll_params_e82x - E82X CGU parameters
- * @refclk_pre_div: Reference clock pre-divisor
- * @feedback_div: Feedback divisor
- * @frac_n_div: Fractional divisor
- * @post_pll_div: Post PLL divisor
- *
- * Clock Generation Unit parameters used to program the PLL based on the
- * selected TIME_REF frequency.
- */
-struct ice_cgu_pll_params_e82x {
- u32 refclk_pre_div;
- u32 feedback_div;
- u32 frac_n_div;
- u32 post_pll_div;
-};
-
#define E810C_QSFP_C827_0_HANDLE 2
#define E810C_QSFP_C827_1_HANDLE 3
enum ice_e810_c827_idx {
@@ -282,31 +265,6 @@ struct ice_cgu_pin_desc {
struct dpll_pin_frequency *freq_supp;
};
-extern const struct
-ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ];
-
-/**
- * struct ice_cgu_pll_params_e825c - E825C CGU parameters
- * @tspll_ck_refclkfreq: tspll_ck_refclkfreq selection
- * @tspll_ndivratio: ndiv ratio that goes directly to the pll
- * @tspll_fbdiv_intgr: TS PLL integer feedback divide
- * @tspll_fbdiv_frac: TS PLL fractional feedback divide
- * @ref1588_ck_div: clock divider for tspll ref
- *
- * Clock Generation Unit parameters used to program the PLL based on the
- * selected TIME_REF/TCXO frequency.
- */
-struct ice_cgu_pll_params_e825c {
- u32 tspll_ck_refclkfreq;
- u32 tspll_ndivratio;
- u32 tspll_fbdiv_intgr;
- u32 tspll_fbdiv_frac;
- u32 ref1588_ck_div;
-};
-
-extern const struct
-ice_cgu_pll_params_e825c e825c_cgu_params[NUM_ICE_TIME_REF_FREQ];
-
#define E810C_QSFP_C827_0_HANDLE 2
#define E810C_QSFP_C827_1_HANDLE 3
@@ -314,7 +272,7 @@ ice_cgu_pll_params_e825c e825c_cgu_params[NUM_ICE_TIME_REF_FREQ];
extern const struct ice_phy_reg_info_eth56g eth56g_phy_res[NUM_ETH56G_PHY_RES];
/* Table of constants related to possible TIME_REF sources */
-extern const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ];
+extern const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TSPLL_FREQ];
/* Table of constants for Vernier calibration on E822 */
extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD];
@@ -328,7 +286,6 @@ extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD];
/* Device agnostic functions */
u8 ice_get_ptp_src_clock_index(struct ice_hw *hw);
-int ice_cgu_cfg_pps_out(struct ice_hw *hw, bool enable);
bool ice_ptp_lock(struct ice_hw *hw);
void ice_ptp_unlock(struct ice_hw *hw);
void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd);
@@ -357,7 +314,8 @@ void ice_ptp_reset_ts_memory_quad_e82x(struct ice_hw *hw, u8 quad);
*
* Returns the current TIME_REF from the capabilities structure.
*/
-static inline enum ice_time_ref_freq ice_e82x_time_ref(const struct ice_hw *hw)
+
+static inline enum ice_tspll_freq ice_e82x_time_ref(const struct ice_hw *hw)
{
return hw->func_caps.ts_func_info.time_ref;
}
@@ -371,17 +329,17 @@ static inline enum ice_time_ref_freq ice_e82x_time_ref(const struct ice_hw *hw)
* change, such as an update to the CGU registers.
*/
static inline void
-ice_set_e82x_time_ref(struct ice_hw *hw, enum ice_time_ref_freq time_ref)
+ice_set_e82x_time_ref(struct ice_hw *hw, enum ice_tspll_freq time_ref)
{
hw->func_caps.ts_func_info.time_ref = time_ref;
}
-static inline u64 ice_e82x_pll_freq(enum ice_time_ref_freq time_ref)
+static inline u64 ice_e82x_pll_freq(enum ice_tspll_freq time_ref)
{
return e82x_time_ref[time_ref].pll_freq;
}
-static inline u64 ice_e82x_nominal_incval(enum ice_time_ref_freq time_ref)
+static inline u64 ice_e82x_nominal_incval(enum ice_tspll_freq time_ref)
{
return e82x_time_ref[time_ref].nominal_incval;
}
@@ -704,6 +662,7 @@ static inline u64 ice_get_base_incval(struct ice_hw *hw)
#define ICE_SMA1_MASK (ICE_SMA1_DIR_EN | ICE_SMA1_TX_EN)
#define ICE_SMA2_MASK (ICE_SMA2_UFL2_RX_DIS | ICE_SMA2_DIR_EN | \
ICE_SMA2_TX_EN)
+#define ICE_SMA2_INACTIVE_MASK (ICE_SMA2_DIR_EN | ICE_SMA2_TX_EN)
#define ICE_ALL_SMA_MASK (ICE_SMA1_MASK | ICE_SMA2_MASK)
#define ICE_SMA_MIN_BIT 3
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index d9d09296d1d4..fff0c1afdb41 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -123,13 +123,13 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
u16 *elems_resp, struct ice_sq_cd *cd)
{
struct ice_aqc_sched_elem_cmd *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.sched_elem_cmd;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
cmd->num_elem_req = cpu_to_le16(elems_req);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
if (!status && elems_resp)
*elems_resp = le16_to_cpu(cmd->num_elem_resp);
@@ -392,10 +392,10 @@ ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
u8 *num_branches, struct ice_sq_cd *cd)
{
struct ice_aqc_get_topo *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.get_topo;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
cmd->port_num = lport;
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
@@ -518,7 +518,7 @@ ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
struct ice_aqc_query_txsched_res_resp *buf,
struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res);
return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
@@ -683,13 +683,13 @@ ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
{
struct ice_aqc_rl_profile *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.rl_profile;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, opcode);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
cmd->num_profiles = cpu_to_le16(num_profiles);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
if (!status && num_processed)
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 0e4dc1a5cff0..9ce4c4db400e 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -933,7 +933,6 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
bool needs_rebuild = false;
struct ice_vsi *vsi;
struct ice_vf *vf;
- int id;
if (!ice_get_num_vfs(pf))
return -ENOENT;
@@ -952,17 +951,7 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
if (msix_vec_count < ICE_MIN_INTR_PER_VF)
return -EINVAL;
- /* Transition of PCI VF function number to function_id */
- for (id = 0; id < pci_num_vf(pdev); id++) {
- if (vf_dev->devfn == pci_iov_virtfn_devfn(pdev, id))
- break;
- }
-
- if (id == pci_num_vf(pdev))
- return -ENOENT;
-
- vf = ice_get_vf_by_id(pf, id);
-
+ vf = ice_get_vf_by_dev(pf, vf_dev);
if (!vf)
return -ENOENT;
@@ -972,6 +961,12 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
return -ENOENT;
}
+ /* No need to rebuild if we're setting to the same value */
+ if (msix_vec_count == vf->num_msix) {
+ ice_put_vf(vf);
+ return 0;
+ }
+
prev_msix = vf->num_msix;
prev_queues = vf->num_vf_qs;
@@ -1166,10 +1161,12 @@ static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
void
ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
{
+ struct ice_aqc_event_lan_overflow *cmd;
u32 gldcb_rtctq, queue;
struct ice_vf *vf;
- gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
+ cmd = libie_aq_raw(&event->desc);
+ gldcb_rtctq = le32_to_cpu(cmd->prtdcb_ruptq);
dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
/* event returns device global Rx queue number */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h
index 96549ca5c52c..d1a998a4bef6 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.h
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.h
@@ -64,6 +64,7 @@ bool
ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto);
u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev);
int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count);
+int ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id);
#else /* CONFIG_PCI_IOV */
static inline void ice_process_vflr_event(struct ice_pf *pf) { }
static inline void ice_free_vfs(struct ice_pf *pf) { }
@@ -164,5 +165,11 @@ ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
{
return -EOPNOTSUPP;
}
+
+static inline int ice_vf_vsi_dis_single_txq(struct ice_vf *vf,
+ struct ice_vsi *vsi, u16 q_id)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_SRIOV_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 9d9a7edd3618..84848f0123e7 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -1511,11 +1511,11 @@ ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
struct ice_sq_cd *cd)
{
struct ice_aqc_get_sw_cfg *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
- cmd = &desc.params.get_sw_conf;
+ cmd = libie_aq_raw(&desc);
cmd->element = cpu_to_le16(*req_desc);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
@@ -1541,11 +1541,11 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
{
struct ice_aqc_add_update_free_vsi_resp *res;
struct ice_aqc_add_get_update_free_vsi *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.vsi_cmd;
- res = &desc.params.add_update_free_vsi_res;
+ cmd = libie_aq_raw(&desc);
+ res = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
@@ -1556,7 +1556,7 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info), cd);
@@ -1585,11 +1585,11 @@ ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
{
struct ice_aqc_add_update_free_vsi_resp *resp;
struct ice_aqc_add_get_update_free_vsi *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.vsi_cmd;
- resp = &desc.params.add_update_free_vsi_res;
+ cmd = libie_aq_raw(&desc);
+ resp = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
@@ -1620,17 +1620,17 @@ ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
{
struct ice_aqc_add_update_free_vsi_resp *resp;
struct ice_aqc_add_get_update_free_vsi *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.vsi_cmd;
- resp = &desc.params.add_update_free_vsi_res;
+ cmd = libie_aq_raw(&desc);
+ resp = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info), cd);
@@ -1944,7 +1944,8 @@ int
ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct ice_aqc_sw_rules *cmd;
+ struct libie_aq_desc desc;
int status;
if (opc != ice_aqc_opc_add_sw_rules &&
@@ -1953,13 +1954,13 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
+ cmd = libie_aq_raw(&desc);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
- desc.params.sw_rules.num_rules_fltr_entry_index =
- cpu_to_le16(num_rules);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
+ cmd->num_rules_fltr_entry_index = cpu_to_le16(num_rules);
status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
if (opc != ice_aqc_opc_add_sw_rules &&
- hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
+ hw->adminq.sq_last_status == LIBIE_AQ_RC_ENOENT)
status = -ENOENT;
if (!status) {
@@ -1989,14 +1990,14 @@ ice_aq_add_recipe(struct ice_hw *hw,
u16 num_recipes, struct ice_sq_cd *cd)
{
struct ice_aqc_add_get_recipe *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 buf_size;
- cmd = &desc.params.add_get_recipe;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
cmd->num_sub_recipes = cpu_to_le16(num_recipes);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
buf_size = num_recipes * sizeof(*s_recipe_list);
@@ -2026,14 +2027,14 @@ ice_aq_get_recipe(struct ice_hw *hw,
u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
{
struct ice_aqc_add_get_recipe *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 buf_size;
int status;
if (*num_recipes != ICE_MAX_NUM_RECIPES)
return -EINVAL;
- cmd = &desc.params.add_get_recipe;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
cmd->return_index = cpu_to_le16(recipe_root);
@@ -2118,9 +2119,9 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 r_assoc,
struct ice_sq_cd *cd)
{
struct ice_aqc_recipe_to_profile *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.recipe_to_profile;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
cmd->profile_id = cpu_to_le16(profile_id);
/* Set the recipe ID bit in the bitmask to let the device know which
@@ -2144,10 +2145,10 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc,
struct ice_sq_cd *cd)
{
struct ice_aqc_recipe_to_profile *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.recipe_to_profile;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
cmd->profile_id = cpu_to_le16(profile_id);
diff --git a/drivers/net/ethernet/intel/ice/ice_tspll.c b/drivers/net/ethernet/intel/ice/ice_tspll.c
new file mode 100644
index 000000000000..66320a4ab86f
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_tspll.c
@@ -0,0 +1,626 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_ptp_hw.h"
+
+static const struct
+ice_tspll_params_e82x e82x_tspll_params[NUM_ICE_TSPLL_FREQ] = {
+ [ICE_TSPLL_FREQ_25_000] = {
+ .refclk_pre_div = 1,
+ .post_pll_div = 6,
+ .feedback_div = 197,
+ .frac_n_div = 2621440,
+ },
+ [ICE_TSPLL_FREQ_122_880] = {
+ .refclk_pre_div = 5,
+ .post_pll_div = 7,
+ .feedback_div = 223,
+ .frac_n_div = 524288
+ },
+ [ICE_TSPLL_FREQ_125_000] = {
+ .refclk_pre_div = 5,
+ .post_pll_div = 7,
+ .feedback_div = 223,
+ .frac_n_div = 524288
+ },
+ [ICE_TSPLL_FREQ_153_600] = {
+ .refclk_pre_div = 5,
+ .post_pll_div = 6,
+ .feedback_div = 159,
+ .frac_n_div = 1572864
+ },
+ [ICE_TSPLL_FREQ_156_250] = {
+ .refclk_pre_div = 5,
+ .post_pll_div = 6,
+ .feedback_div = 159,
+ .frac_n_div = 1572864
+ },
+ [ICE_TSPLL_FREQ_245_760] = {
+ .refclk_pre_div = 10,
+ .post_pll_div = 7,
+ .feedback_div = 223,
+ .frac_n_div = 524288
+ },
+};
+
+/**
+ * ice_tspll_clk_freq_str - Convert time_ref_freq to string
+ * @clk_freq: Clock frequency
+ *
+ * Return: specified TIME_REF clock frequency converted to a string.
+ */
+static const char *ice_tspll_clk_freq_str(enum ice_tspll_freq clk_freq)
+{
+ switch (clk_freq) {
+ case ICE_TSPLL_FREQ_25_000:
+ return "25 MHz";
+ case ICE_TSPLL_FREQ_122_880:
+ return "122.88 MHz";
+ case ICE_TSPLL_FREQ_125_000:
+ return "125 MHz";
+ case ICE_TSPLL_FREQ_153_600:
+ return "153.6 MHz";
+ case ICE_TSPLL_FREQ_156_250:
+ return "156.25 MHz";
+ case ICE_TSPLL_FREQ_245_760:
+ return "245.76 MHz";
+ default:
+ return "Unknown";
+ }
+}
+
+/**
+ * ice_tspll_default_freq - Return default frequency for a MAC type
+ * @mac_type: MAC type
+ *
+ * Return: default TSPLL frequency for a correct MAC type, -ERANGE otherwise.
+ */
+static enum ice_tspll_freq ice_tspll_default_freq(enum ice_mac_type mac_type)
+{
+ switch (mac_type) {
+ case ICE_MAC_GENERIC:
+ return ICE_TSPLL_FREQ_25_000;
+ case ICE_MAC_GENERIC_3K_E825:
+ return ICE_TSPLL_FREQ_156_250;
+ default:
+ return -ERANGE;
+ }
+}
+
+/**
+ * ice_tspll_check_params - Check if TSPLL params are correct
+ * @hw: Pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF or TCXO)
+ *
+ * Return: true if TSPLL params are correct, false otherwise.
+ */
+static bool ice_tspll_check_params(struct ice_hw *hw,
+ enum ice_tspll_freq clk_freq,
+ enum ice_clk_src clk_src)
+{
+ if (clk_freq >= NUM_ICE_TSPLL_FREQ) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid TSPLL frequency %u\n",
+ clk_freq);
+ return false;
+ }
+
+ if (clk_src >= NUM_ICE_CLK_SRC) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
+ clk_src);
+ return false;
+ }
+
+ if ((hw->mac_type == ICE_MAC_GENERIC_3K_E825 ||
+ clk_src == ICE_CLK_SRC_TCXO) &&
+ clk_freq != ice_tspll_default_freq(hw->mac_type)) {
+ dev_warn(ice_hw_to_dev(hw), "Unsupported frequency for this clock source\n");
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ice_tspll_clk_src_str - Convert time_ref_src to string
+ * @clk_src: Clock source
+ *
+ * Return: specified clock source converted to its string name
+ */
+static const char *ice_tspll_clk_src_str(enum ice_clk_src clk_src)
+{
+ switch (clk_src) {
+ case ICE_CLK_SRC_TCXO:
+ return "TCXO";
+ case ICE_CLK_SRC_TIME_REF:
+ return "TIME_REF";
+ default:
+ return "Unknown";
+ }
+}
+
+/**
+ * ice_tspll_log_cfg - Log current/new TSPLL configuration
+ * @hw: Pointer to the HW struct
+ * @enable: CGU enabled/disabled
+ * @clk_src: Current clock source
+ * @tspll_freq: Current clock frequency
+ * @lock: CGU lock status
+ * @new_cfg: true if this is a new config
+ */
+static void ice_tspll_log_cfg(struct ice_hw *hw, bool enable, u8 clk_src,
+ u8 tspll_freq, bool lock, bool new_cfg)
+{
+ dev_dbg(ice_hw_to_dev(hw),
+ "%s TSPLL configuration -- %s, src %s, freq %s, PLL %s\n",
+ new_cfg ? "New" : "Current", str_enabled_disabled(enable),
+ ice_tspll_clk_src_str((enum ice_clk_src)clk_src),
+ ice_tspll_clk_freq_str((enum ice_tspll_freq)tspll_freq),
+ lock ? "locked" : "unlocked");
+}
+
+/**
+ * ice_tspll_cfg_e82x - Configure the Clock Generation Unit TSPLL
+ * @hw: Pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF, or TCXO)
+ *
+ * Configure the Clock Generation Unit with the desired clock frequency and
+ * time reference, enabling the PLL which drives the PTP hardware clock.
+ *
+ * Return:
+ * * %0 - success
+ * * %-EINVAL - input parameters are incorrect
+ * * %-EBUSY - failed to lock TSPLL
+ * * %other - CGU read/write failure
+ */
+static int ice_tspll_cfg_e82x(struct ice_hw *hw, enum ice_tspll_freq clk_freq,
+ enum ice_clk_src clk_src)
+{
+ u32 val, r9, r24;
+ int err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R9, &r9);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R24, &r24);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_RO_BWM_LF, &val);
+ if (err)
+ return err;
+
+ ice_tspll_log_cfg(hw, !!FIELD_GET(ICE_CGU_R23_R24_TSPLL_ENABLE, r24),
+ FIELD_GET(ICE_CGU_R23_R24_TIME_REF_SEL, r24),
+ FIELD_GET(ICE_CGU_R9_TIME_REF_FREQ_SEL, r9),
+ !!FIELD_GET(ICE_CGU_RO_BWM_LF_TRUE_LOCK, val),
+ false);
+
+ /* Disable the PLL before changing the clock source or frequency */
+ if (FIELD_GET(ICE_CGU_R23_R24_TSPLL_ENABLE, r24)) {
+ r24 &= ~ICE_CGU_R23_R24_TSPLL_ENABLE;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R24, r24);
+ if (err)
+ return err;
+ }
+
+ /* Set the frequency */
+ r9 &= ~ICE_CGU_R9_TIME_REF_FREQ_SEL;
+ r9 |= FIELD_PREP(ICE_CGU_R9_TIME_REF_FREQ_SEL, clk_freq);
+ err = ice_write_cgu_reg(hw, ICE_CGU_R9, r9);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL feedback divisor */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R19, &val);
+ if (err)
+ return err;
+
+ val &= ~(ICE_CGU_R19_TSPLL_FBDIV_INTGR_E82X | ICE_CGU_R19_TSPLL_NDIVRATIO);
+ val |= FIELD_PREP(ICE_CGU_R19_TSPLL_FBDIV_INTGR_E82X,
+ e82x_tspll_params[clk_freq].feedback_div);
+ val |= FIELD_PREP(ICE_CGU_R19_TSPLL_NDIVRATIO, 1);
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R19, val);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL post divisor */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R22, &val);
+ if (err)
+ return err;
+
+ val &= ~(ICE_CGU_R22_TIME1588CLK_DIV |
+ ICE_CGU_R22_TIME1588CLK_DIV2);
+ val |= FIELD_PREP(ICE_CGU_R22_TIME1588CLK_DIV,
+ e82x_tspll_params[clk_freq].post_pll_div);
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R22, val);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL pre divisor and clock source */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R24, &r24);
+ if (err)
+ return err;
+
+ r24 &= ~(ICE_CGU_R23_R24_REF1588_CK_DIV | ICE_CGU_R24_FBDIV_FRAC |
+ ICE_CGU_R23_R24_TIME_REF_SEL);
+ r24 |= FIELD_PREP(ICE_CGU_R23_R24_REF1588_CK_DIV,
+ e82x_tspll_params[clk_freq].refclk_pre_div);
+ r24 |= FIELD_PREP(ICE_CGU_R24_FBDIV_FRAC,
+ e82x_tspll_params[clk_freq].frac_n_div);
+ r24 |= FIELD_PREP(ICE_CGU_R23_R24_TIME_REF_SEL, clk_src);
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R24, r24);
+ if (err)
+ return err;
+
+ /* Wait to ensure everything is stable */
+ usleep_range(10, 20);
+
+ /* Finally, enable the PLL */
+ r24 |= ICE_CGU_R23_R24_TSPLL_ENABLE;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R24, r24);
+ if (err)
+ return err;
+
+ /* Wait at least 1 ms to verify if the PLL locks */
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_RO_BWM_LF, &val);
+ if (err)
+ return err;
+
+ if (!(val & ICE_CGU_RO_BWM_LF_TRUE_LOCK)) {
+ dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
+ return -EBUSY;
+ }
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R9, &r9);
+ if (err)
+ return err;
+ err = ice_read_cgu_reg(hw, ICE_CGU_R24, &r24);
+ if (err)
+ return err;
+
+ ice_tspll_log_cfg(hw, !!FIELD_GET(ICE_CGU_R23_R24_TSPLL_ENABLE, r24),
+ FIELD_GET(ICE_CGU_R23_R24_TIME_REF_SEL, r24),
+ FIELD_GET(ICE_CGU_R9_TIME_REF_FREQ_SEL, r9),
+ true, true);
+
+ return 0;
+}
+
+/**
+ * ice_tspll_dis_sticky_bits_e82x - disable TSPLL sticky bits
+ * @hw: Pointer to the HW struct
+ *
+ * Configure the Clock Generation Unit TSPLL sticky bits so they don't latch on
+ * losing TSPLL lock, but always show current state.
+ *
+ * Return: 0 on success, other error codes when failed to read/write CGU.
+ */
+static int ice_tspll_dis_sticky_bits_e82x(struct ice_hw *hw)
+{
+ u32 val;
+ int err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_CNTR_BIST, &val);
+ if (err)
+ return err;
+
+ val &= ~(ICE_CGU_CNTR_BIST_PLLLOCK_SEL_0 |
+ ICE_CGU_CNTR_BIST_PLLLOCK_SEL_1);
+
+ return ice_write_cgu_reg(hw, ICE_CGU_CNTR_BIST, val);
+}
+
+/**
+ * ice_tspll_cfg_e825c - Configure the TSPLL for E825-C
+ * @hw: Pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF, or TCXO)
+ *
+ * Configure the Clock Generation Unit with the desired clock frequency and
+ * time reference, enabling the PLL which drives the PTP hardware clock.
+ *
+ * Return:
+ * * %0 - success
+ * * %-EINVAL - input parameters are incorrect
+ * * %-EBUSY - failed to lock TSPLL
+ * * %other - CGU read/write failure
+ */
+static int ice_tspll_cfg_e825c(struct ice_hw *hw, enum ice_tspll_freq clk_freq,
+ enum ice_clk_src clk_src)
+{
+ u32 val, r9, r23;
+ int err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R9, &r9);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R23, &r23);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_RO_LOCK, &val);
+ if (err)
+ return err;
+
+ ice_tspll_log_cfg(hw, !!FIELD_GET(ICE_CGU_R23_R24_TSPLL_ENABLE, r23),
+ FIELD_GET(ICE_CGU_R23_R24_TIME_REF_SEL, r23),
+ FIELD_GET(ICE_CGU_R9_TIME_REF_FREQ_SEL, r9),
+ !!FIELD_GET(ICE_CGU_RO_LOCK_TRUE_LOCK, val),
+ false);
+
+ /* Disable the PLL before changing the clock source or frequency */
+ if (FIELD_GET(ICE_CGU_R23_R24_TSPLL_ENABLE, r23)) {
+ r23 &= ~ICE_CGU_R23_R24_TSPLL_ENABLE;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R23, r23);
+ if (err)
+ return err;
+ }
+
+ if (FIELD_GET(ICE_CGU_R9_TIME_SYNC_EN, r9)) {
+ r9 &= ~ICE_CGU_R9_TIME_SYNC_EN;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R9, r9);
+ if (err)
+ return err;
+ }
+
+ /* Set the frequency and enable the correct receiver */
+ r9 &= ~(ICE_CGU_R9_TIME_REF_FREQ_SEL | ICE_CGU_R9_CLK_EREF0_EN |
+ ICE_CGU_R9_TIME_REF_EN);
+ r9 |= FIELD_PREP(ICE_CGU_R9_TIME_REF_FREQ_SEL, clk_freq);
+ if (clk_src == ICE_CLK_SRC_TCXO)
+ r9 |= ICE_CGU_R9_CLK_EREF0_EN;
+ else
+ r9 |= ICE_CGU_R9_TIME_REF_EN;
+ r9 |= ICE_CGU_R9_TIME_SYNC_EN;
+ err = ice_write_cgu_reg(hw, ICE_CGU_R9, r9);
+ if (err)
+ return err;
+
+ /* Choose the referenced frequency */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R16, &val);
+ if (err)
+ return err;
+ val &= ~ICE_CGU_R16_TSPLL_CK_REFCLKFREQ;
+ val |= FIELD_PREP(ICE_CGU_R16_TSPLL_CK_REFCLKFREQ,
+ ICE_TSPLL_CK_REFCLKFREQ_E825);
+ err = ice_write_cgu_reg(hw, ICE_CGU_R16, val);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL feedback divisor */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R19, &val);
+ if (err)
+ return err;
+
+ val &= ~(ICE_CGU_R19_TSPLL_FBDIV_INTGR_E825 |
+ ICE_CGU_R19_TSPLL_NDIVRATIO);
+ val |= FIELD_PREP(ICE_CGU_R19_TSPLL_FBDIV_INTGR_E825,
+ ICE_TSPLL_FBDIV_INTGR_E825);
+ val |= FIELD_PREP(ICE_CGU_R19_TSPLL_NDIVRATIO,
+ ICE_TSPLL_NDIVRATIO_E825);
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R19, val);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL post divisor, these two are constant */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R22, &val);
+ if (err)
+ return err;
+
+ val &= ~(ICE_CGU_R22_TIME1588CLK_DIV |
+ ICE_CGU_R22_TIME1588CLK_DIV2);
+ val |= FIELD_PREP(ICE_CGU_R22_TIME1588CLK_DIV, 5);
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R22, val);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL pre divisor (constant) and clock source */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R23, &r23);
+ if (err)
+ return err;
+
+ r23 &= ~(ICE_CGU_R23_R24_REF1588_CK_DIV | ICE_CGU_R23_R24_TIME_REF_SEL);
+ r23 |= FIELD_PREP(ICE_CGU_R23_R24_TIME_REF_SEL, clk_src);
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R23, r23);
+ if (err)
+ return err;
+
+ /* Clear the R24 register. */
+ err = ice_write_cgu_reg(hw, ICE_CGU_R24, 0);
+ if (err)
+ return err;
+
+ /* Wait to ensure everything is stable */
+ usleep_range(10, 20);
+
+ /* Finally, enable the PLL */
+ r23 |= ICE_CGU_R23_R24_TSPLL_ENABLE;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R23, r23);
+ if (err)
+ return err;
+
+ /* Wait at least 1 ms to verify if the PLL locks */
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_RO_LOCK, &val);
+ if (err)
+ return err;
+
+ if (!(val & ICE_CGU_RO_LOCK_TRUE_LOCK)) {
+ dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
+ return -EBUSY;
+ }
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R9, &r9);
+ if (err)
+ return err;
+ err = ice_read_cgu_reg(hw, ICE_CGU_R23, &r23);
+ if (err)
+ return err;
+
+ ice_tspll_log_cfg(hw, !!FIELD_GET(ICE_CGU_R23_R24_TSPLL_ENABLE, r23),
+ FIELD_GET(ICE_CGU_R23_R24_TIME_REF_SEL, r23),
+ FIELD_GET(ICE_CGU_R9_TIME_REF_FREQ_SEL, r9),
+ true, true);
+
+ return 0;
+}
+
+/**
+ * ice_tspll_dis_sticky_bits_e825c - disable TSPLL sticky bits for E825-C
+ * @hw: Pointer to the HW struct
+ *
+ * Configure the Clock Generation Unit TSPLL sticky bits so they don't latch on
+ * losing TSPLL lock, but always show current state.
+ *
+ * Return: 0 on success, other error codes when failed to read/write CGU.
+ */
+static int ice_tspll_dis_sticky_bits_e825c(struct ice_hw *hw)
+{
+ u32 val;
+ int err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_BW_TDC, &val);
+ if (err)
+ return err;
+
+ val &= ~ICE_CGU_BW_TDC_PLLLOCK_SEL;
+
+ return ice_write_cgu_reg(hw, ICE_CGU_BW_TDC, val);
+}
+
+/**
+ * ice_tspll_cfg_pps_out_e825c - Enable/disable 1PPS output and set amplitude
+ * @hw: pointer to the HW struct
+ * @enable: true to enable 1PPS output, false to disable it
+ *
+ * Return: 0 on success, other negative error code when CGU read/write failed.
+ */
+int ice_tspll_cfg_pps_out_e825c(struct ice_hw *hw, bool enable)
+{
+ u32 val;
+ int err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R9, &val);
+ if (err)
+ return err;
+
+ val &= ~(ICE_CGU_R9_ONE_PPS_OUT_EN | ICE_CGU_R9_ONE_PPS_OUT_AMP);
+ val |= FIELD_PREP(ICE_CGU_R9_ONE_PPS_OUT_EN, enable) |
+ ICE_CGU_R9_ONE_PPS_OUT_AMP;
+
+ return ice_write_cgu_reg(hw, ICE_CGU_R9, val);
+}
+
+/**
+ * ice_tspll_cfg - Configure the Clock Generation Unit TSPLL
+ * @hw: Pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF, or TCXO)
+ *
+ * Configure the Clock Generation Unit with the desired clock frequency and
+ * time reference, enabling the TSPLL which drives the PTP hardware clock.
+ *
+ * Return: 0 on success, -ERANGE on unsupported MAC type, other negative error
+ * codes when failed to configure CGU.
+ */
+static int ice_tspll_cfg(struct ice_hw *hw, enum ice_tspll_freq clk_freq,
+ enum ice_clk_src clk_src)
+{
+ switch (hw->mac_type) {
+ case ICE_MAC_GENERIC:
+ return ice_tspll_cfg_e82x(hw, clk_freq, clk_src);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_tspll_cfg_e825c(hw, clk_freq, clk_src);
+ default:
+ return -ERANGE;
+ }
+}
+
+/**
+ * ice_tspll_dis_sticky_bits - disable TSPLL sticky bits
+ * @hw: Pointer to the HW struct
+ *
+ * Configure the Clock Generation Unit TSPLL sticky bits so they don't latch on
+ * losing TSPLL lock, but always show current state.
+ *
+ * Return: 0 on success, -ERANGE on unsupported MAC type.
+ */
+static int ice_tspll_dis_sticky_bits(struct ice_hw *hw)
+{
+ switch (hw->mac_type) {
+ case ICE_MAC_GENERIC:
+ return ice_tspll_dis_sticky_bits_e82x(hw);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_tspll_dis_sticky_bits_e825c(hw);
+ default:
+ return -ERANGE;
+ }
+}
+
+/**
+ * ice_tspll_init - Initialize TSPLL with settings from firmware
+ * @hw: Pointer to the HW structure
+ *
+ * Initialize the Clock Generation Unit of the E82X/E825 device.
+ *
+ * Return: 0 on success, other error codes when failed to read/write/cfg CGU.
+ */
+int ice_tspll_init(struct ice_hw *hw)
+{
+ struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
+ enum ice_tspll_freq tspll_freq;
+ enum ice_clk_src clk_src;
+ int err;
+
+ /* Only E822, E823 and E825 products support TSPLL */
+ if (hw->mac_type != ICE_MAC_GENERIC &&
+ hw->mac_type != ICE_MAC_GENERIC_3K_E825)
+ return 0;
+
+ tspll_freq = (enum ice_tspll_freq)ts_info->time_ref;
+ clk_src = (enum ice_clk_src)ts_info->clk_src;
+ if (!ice_tspll_check_params(hw, tspll_freq, clk_src))
+ return -EINVAL;
+
+ /* Disable sticky lock detection so lock status reported is accurate */
+ err = ice_tspll_dis_sticky_bits(hw);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL using the parameters from the function
+ * capabilities.
+ */
+ err = ice_tspll_cfg(hw, tspll_freq, clk_src);
+ if (err) {
+ dev_warn(ice_hw_to_dev(hw), "Failed to lock TSPLL to predefined frequency. Retrying with fallback frequency.\n");
+
+ /* Try to lock to internal TCXO as a fallback. */
+ tspll_freq = ice_tspll_default_freq(hw->mac_type);
+ clk_src = ICE_CLK_SRC_TCXO;
+ err = ice_tspll_cfg(hw, tspll_freq, clk_src);
+ if (err)
+ dev_warn(ice_hw_to_dev(hw), "Failed to lock TSPLL to fallback frequency.\n");
+ }
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_tspll.h b/drivers/net/ethernet/intel/ice/ice_tspll.h
new file mode 100644
index 000000000000..c0b1232cc07c
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_tspll.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025, Intel Corporation. */
+
+#ifndef _ICE_TSPLL_H_
+#define _ICE_TSPLL_H_
+
+/**
+ * struct ice_tspll_params_e82x - E82X TSPLL parameters
+ * @refclk_pre_div: Reference clock pre-divisor
+ * @post_pll_div: Post PLL divisor
+ * @feedback_div: Feedback divisor
+ * @frac_n_div: Fractional divisor
+ *
+ * Clock Generation Unit parameters used to program the PLL based on the
+ * selected TIME_REF/TCXO frequency.
+ */
+struct ice_tspll_params_e82x {
+ u8 refclk_pre_div;
+ u8 post_pll_div;
+ u8 feedback_div;
+ u32 frac_n_div;
+};
+
+#define ICE_TSPLL_CK_REFCLKFREQ_E825 0x1F
+#define ICE_TSPLL_NDIVRATIO_E825 5
+#define ICE_TSPLL_FBDIV_INTGR_E825 256
+
+int ice_tspll_cfg_pps_out_e825c(struct ice_hw *hw, bool enable);
+int ice_tspll_init(struct ice_hw *hw);
+
+#endif /* _ICE_TSPLL_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 0e5107fe62ad..29e0088ab6b2 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -20,7 +20,6 @@
#define ICE_RX_HDR_SIZE 256
-#define FDIR_DESC_RXDID 0x40
#define ICE_FDIR_CLEAN_DELAY 10
/**
@@ -707,6 +706,37 @@ ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
}
/**
+ * ice_init_ctrl_rx_descs - Initialize Rx descriptors for control vsi.
+ * @rx_ring: ring to init descriptors on
+ * @count: number of descriptors to initialize
+ */
+void ice_init_ctrl_rx_descs(struct ice_rx_ring *rx_ring, u32 count)
+{
+ union ice_32b_rx_flex_desc *rx_desc;
+ u32 ntu = rx_ring->next_to_use;
+
+ if (!count)
+ return;
+
+ rx_desc = ICE_RX_DESC(rx_ring, ntu);
+
+ do {
+ rx_desc++;
+ ntu++;
+ if (unlikely(ntu == rx_ring->count)) {
+ rx_desc = ICE_RX_DESC(rx_ring, 0);
+ ntu = 0;
+ }
+
+ rx_desc->wb.status_error0 = 0;
+ count--;
+ } while (count);
+
+ if (rx_ring->next_to_use != ntu)
+ ice_release_rx_desc(rx_ring, ntu);
+}
+
+/**
* ice_alloc_rx_bufs - Replace used receive buffers
* @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace
@@ -726,8 +756,7 @@ bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count)
struct ice_rx_buf *bi;
/* do nothing if no valid netdev defined */
- if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
- !cleaned_count)
+ if (!rx_ring->netdev || !cleaned_count)
return false;
/* get the Rx descriptor and buffer based on next_to_use */
@@ -1184,6 +1213,45 @@ static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
}
/**
+ * ice_clean_ctrl_rx_irq - Clean descriptors from flow director Rx ring
+ * @rx_ring: Rx descriptor ring for ctrl_vsi to transact packets on
+ *
+ * This function cleans Rx descriptors from the ctrl_vsi Rx ring used
+ * to set flow director rules on VFs.
+ */
+void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring)
+{
+ u32 ntc = rx_ring->next_to_clean;
+ unsigned int total_rx_pkts = 0;
+ u32 cnt = rx_ring->count;
+
+ while (likely(total_rx_pkts < ICE_DFLT_IRQ_WORK)) {
+ struct ice_vsi *ctrl_vsi = rx_ring->vsi;
+ union ice_32b_rx_flex_desc *rx_desc;
+ u16 stat_err_bits;
+
+ rx_desc = ICE_RX_DESC(rx_ring, ntc);
+
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
+ if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
+ break;
+
+ dma_rmb();
+
+ if (ctrl_vsi->vf)
+ ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
+
+ if (++ntc == cnt)
+ ntc = 0;
+ total_rx_pkts++;
+ }
+
+ rx_ring->first_desc = ntc;
+ rx_ring->next_to_clean = ntc;
+ ice_init_ctrl_rx_descs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring));
+}
+
+/**
* ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: Rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
@@ -1195,7 +1263,7 @@ static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
*
* Returns amount of work completed
*/
-int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
unsigned int offset = rx_ring->rx_offset;
@@ -1242,17 +1310,6 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
dma_rmb();
ice_trace(clean_rx_irq, rx_ring, rx_desc);
- if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
- struct ice_vsi *ctrl_vsi = rx_ring->vsi;
-
- if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
- ctrl_vsi->vf)
- ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
- if (++ntc == cnt)
- ntc = 0;
- rx_ring->first_desc = ntc;
- continue;
- }
size = le16_to_cpu(rx_desc->wb.pkt_len) &
ICE_RX_FLX_DESC_PKT_LEN_M;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index a4b1e9514632..fef750c5f288 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -491,6 +491,7 @@ static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring)
union ice_32b_rx_flex_desc;
+void ice_init_ctrl_rx_descs(struct ice_rx_ring *rx_ring, u32 num_descs);
bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, unsigned int cleaned_count);
netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
u16
@@ -506,6 +507,6 @@ int ice_napi_poll(struct napi_struct *napi, int budget);
int
ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
u8 *raw_packet);
-int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget);
void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring);
+void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring);
#endif /* _ICE_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 3d68f465952d..03c6c271865d 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -326,17 +326,17 @@ struct ice_hw_common_caps {
#define ICE_TS_TMR_IDX_ASSOC_M BIT(24)
/* TIME_REF clock rate specification */
-enum ice_time_ref_freq {
- ICE_TIME_REF_FREQ_25_000 = 0,
- ICE_TIME_REF_FREQ_122_880 = 1,
- ICE_TIME_REF_FREQ_125_000 = 2,
- ICE_TIME_REF_FREQ_153_600 = 3,
- ICE_TIME_REF_FREQ_156_250 = 4,
- ICE_TIME_REF_FREQ_245_760 = 5,
+enum ice_tspll_freq {
+ ICE_TSPLL_FREQ_25_000 = 0,
+ ICE_TSPLL_FREQ_122_880 = 1,
+ ICE_TSPLL_FREQ_125_000 = 2,
+ ICE_TSPLL_FREQ_153_600 = 3,
+ ICE_TSPLL_FREQ_156_250 = 4,
+ ICE_TSPLL_FREQ_245_760 = 5,
- NUM_ICE_TIME_REF_FREQ,
+ NUM_ICE_TSPLL_FREQ,
- ICE_TIME_REF_FREQ_INVALID = -1,
+ ICE_TSPLL_FREQ_INVALID = -1,
};
/* Clock source specification */
@@ -349,7 +349,7 @@ enum ice_clk_src {
struct ice_ts_func_info {
/* Function specific info */
- enum ice_time_ref_freq time_ref;
+ enum ice_tspll_freq time_ref;
u8 clk_freq;
u8 clk_src;
u8 tmr_index_assoc;
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 48cd533e93b7..5ee74f3e82dc 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -859,16 +859,13 @@ static void ice_notify_vf_reset(struct ice_vf *vf)
int ice_reset_vf(struct ice_vf *vf, u32 flags)
{
struct ice_pf *pf = vf->pf;
- struct ice_lag *lag;
struct ice_vsi *vsi;
- u8 act_prt, pri_prt;
struct device *dev;
int err = 0;
+ u8 act_prt;
bool rsd;
dev = ice_pf_to_dev(pf);
- act_prt = ICE_LAG_INVALID_PORT;
- pri_prt = pf->hw.port_info->lport;
if (flags & ICE_VF_RESET_NOTIFY)
ice_notify_vf_reset(vf);
@@ -884,16 +881,8 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
else
lockdep_assert_held(&vf->cfg_lock);
- lag = pf->lag;
mutex_lock(&pf->lag_mutex);
- if (lag && lag->bonded && lag->primary) {
- act_prt = lag->active_port;
- if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
- lag->upper_netdev)
- ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
- else
- act_prt = ICE_LAG_INVALID_PORT;
- }
+ act_prt = ice_lag_prepare_vf_reset(pf->lag);
if (ice_is_vf_disabled(vf)) {
vsi = ice_get_vf_vsi(vf);
@@ -979,9 +968,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
ice_reset_vf_mbx_cnt(vf);
out_unlock:
- if (lag && lag->bonded && lag->primary &&
- act_prt != ICE_LAG_INVALID_PORT)
- ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ ice_lag_complete_vf_reset(pf->lag, act_prt);
mutex_unlock(&pf->lag_mutex);
if (flags & ICE_VF_RESET_LOCK)
@@ -1022,6 +1009,9 @@ void ice_initialize_vf_entry(struct ice_vf *vf)
vf->num_msix = vfs->num_msix_per;
vf->num_vf_qs = vfs->num_qps_per;
+ /* set default RSS hash configuration */
+ vf->rss_hashcfg = ICE_DEFAULT_RSS_HASHCFG;
+
/* ctrl_vsi_idx will be set to a valid value only when iAVF
* creates its first fdir rule.
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index 482f4285fd35..ffe1f9f830ea 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -106,8 +106,7 @@ struct ice_vf {
u16 ctrl_vsi_idx;
struct ice_vf_fdir fdir;
struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
- /* first vector index of this VF in the PF space */
- int first_vector_idx;
+ u64 rss_hashcfg; /* RSS hash configuration */
struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
struct virtchnl_version_info vf_ver;
u32 driver_caps; /* reported by VF driver */
@@ -126,10 +125,14 @@ struct ice_vf {
u8 link_up:1; /* only valid if VF link is forced */
u8 lldp_tx_ena:1;
+ u16 num_msix; /* num of MSI-X configured on this VF */
+
u32 ptp_caps;
unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */
unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
+ /* first vector index of this VF in the PF space */
+ int first_vector_idx;
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
unsigned long vf_caps; /* VF's adv. capabilities */
@@ -154,7 +157,6 @@ struct ice_vf {
u16 lldp_recipe_id;
u16 lldp_rule_id;
- u16 num_msix; /* num of MSI-X configured on this VF */
struct ice_vf_qs_bw qs_bw[ICE_MAX_RSS_QS_PER_VF];
};
@@ -237,6 +239,18 @@ static inline bool ice_vf_is_lldp_ena(struct ice_vf *vf)
#ifdef CONFIG_PCI_IOV
struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id);
+
+static inline struct ice_vf *ice_get_vf_by_dev(struct ice_pf *pf,
+ struct pci_dev *vf_dev)
+{
+ int vf_id = pci_iov_vf_id(vf_dev);
+
+ if (vf_id < 0)
+ return NULL;
+
+ return ice_get_vf_by_id(pf, pci_iov_vf_id(vf_dev));
+}
+
void ice_put_vf(struct ice_vf *vf);
bool ice_has_vfs(struct ice_pf *pf);
u16 ice_get_num_vfs(struct ice_pf *pf);
@@ -263,6 +277,12 @@ static inline struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
return NULL;
}
+static inline struct ice_vf *ice_get_vf_by_dev(struct ice_pf *pf,
+ struct pci_dev *vf_dev)
+{
+ return NULL;
+}
+
static inline void ice_put_vf(struct ice_vf *vf)
{
}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
index 75c8113e58ee..7798a5d4bc9d 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
@@ -23,18 +23,18 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
u8 *msg, u16 msglen, struct ice_sq_cd *cd)
{
struct ice_aqc_pf_vf_msg *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
- cmd = &desc.params.virt;
+ cmd = libie_aq_raw(&desc);
cmd->id = cpu_to_le32(vfid);
desc.cookie_high = cpu_to_le32(v_opcode);
desc.cookie_low = cpu_to_le32(v_retval);
if (msglen)
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index eeeb9968e477..257967273079 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -304,10 +304,10 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
msg, msglen, NULL);
- if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
+ if (aq_ret && pf->hw.mailboxq.sq_last_status != LIBIE_AQ_RC_ENOSYS) {
dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %s\n",
vf->vf_id, aq_ret,
- ice_aq_str(pf->hw.mailboxq.sq_last_status));
+ libie_aq_str(pf->hw.mailboxq.sq_last_status));
return -EIO;
}
@@ -852,7 +852,7 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
if (status) {
dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
} else {
vsi->info.q_opt_rss = ctx->info.q_opt_rss;
@@ -1427,7 +1427,7 @@ static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
* @vsi: VSI of the VF to configure
* @q_idx: VF queue index used to determine the queue in the PF's space
*/
-static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
+void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
{
struct ice_hw *hw = &vsi->back->hw;
u32 pfq = vsi->txq_map[q_idx];
@@ -1450,7 +1450,7 @@ static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
* @vsi: VSI of the VF to configure
* @q_idx: VF queue index used to determine the queue in the PF's space
*/
-static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
+void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
{
struct ice_hw *hw = &vsi->back->hw;
u32 pfq = vsi->rxq_map[q_idx];
@@ -1566,8 +1566,7 @@ error_param:
* disabled then clear q_id bit in the enabled queues bitmap and return
* success. Otherwise return error.
*/
-static int
-ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
+int ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
{
struct ice_txq_meta txq_meta = { 0 };
struct ice_tx_ring *ring;
@@ -1997,24 +1996,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
(struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi;
struct ice_pf *pf = vf->pf;
- struct ice_lag *lag;
struct ice_vsi *vsi;
- u8 act_prt, pri_prt;
int i = -1, q_idx;
bool ena_ts;
+ u8 act_prt;
- lag = pf->lag;
mutex_lock(&pf->lag_mutex);
- act_prt = ICE_LAG_INVALID_PORT;
- pri_prt = pf->hw.port_info->lport;
- if (lag && lag->bonded && lag->primary) {
- act_prt = lag->active_port;
- if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
- lag->upper_netdev)
- ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
- else
- act_prt = ICE_LAG_INVALID_PORT;
- }
+ act_prt = ice_lag_prepare_vf_reset(pf->lag);
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
goto error_param;
@@ -2142,9 +2130,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
}
}
- if (lag && lag->bonded && lag->primary &&
- act_prt != ICE_LAG_INVALID_PORT)
- ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ ice_lag_complete_vf_reset(pf->lag, act_prt);
mutex_unlock(&pf->lag_mutex);
/* send the response to the VF */
@@ -2161,9 +2147,7 @@ error_param:
vf->vf_id, i);
}
- if (lag && lag->bonded && lag->primary &&
- act_prt != ICE_LAG_INVALID_PORT)
- ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ ice_lag_complete_vf_reset(pf->lag, act_prt);
mutex_unlock(&pf->lag_mutex);
ice_lag_move_new_vf_nodes(vf);
@@ -2621,7 +2605,7 @@ static bool ice_vf_vlan_offload_ena(u32 caps)
* ice_is_vlan_promisc_allowed - check if VLAN promiscuous config is allowed
* @vf: VF used to determine if VLAN promiscuous config is allowed
*/
-static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
+bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
{
if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
@@ -2640,8 +2624,8 @@ static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
* This function should only be called if VLAN promiscuous mode is allowed,
* which can be determined via ice_is_vlan_promisc_allowed().
*/
-static int ice_vf_ena_vlan_promisc(struct ice_vf *vf, struct ice_vsi *vsi,
- struct ice_vlan *vlan)
+int ice_vf_ena_vlan_promisc(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct ice_vlan *vlan)
{
u8 promisc_m = 0;
int status;
@@ -2999,13 +2983,13 @@ error_param:
}
/**
- * ice_vc_get_rss_hena - return the RSS HENA bits allowed by the hardware
+ * ice_vc_get_rss_hashcfg - return the RSS Hash configuration
* @vf: pointer to the VF info
*/
-static int ice_vc_get_rss_hena(struct ice_vf *vf)
+static int ice_vc_get_rss_hashcfg(struct ice_vf *vf)
{
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_rss_hena *vrh = NULL;
+ struct virtchnl_rss_hashcfg *vrh = NULL;
int len = 0, ret;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
@@ -3019,7 +3003,7 @@ static int ice_vc_get_rss_hena(struct ice_vf *vf)
goto err;
}
- len = sizeof(struct virtchnl_rss_hena);
+ len = sizeof(struct virtchnl_rss_hashcfg);
vrh = kzalloc(len, GFP_KERNEL);
if (!vrh) {
v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
@@ -3027,23 +3011,23 @@ static int ice_vc_get_rss_hena(struct ice_vf *vf)
goto err;
}
- vrh->hena = ICE_DEFAULT_RSS_HENA;
+ vrh->hashcfg = ICE_DEFAULT_RSS_HASHCFG;
err:
/* send the response back to the VF */
- ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS, v_ret,
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, v_ret,
(u8 *)vrh, len);
kfree(vrh);
return ret;
}
/**
- * ice_vc_set_rss_hena - set RSS HENA bits for the VF
+ * ice_vc_set_rss_hashcfg - set RSS Hash configuration bits for the VF
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
*/
-static int ice_vc_set_rss_hena(struct ice_vf *vf, u8 *msg)
+static int ice_vc_set_rss_hashcfg(struct ice_vf *vf, u8 *msg)
{
- struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
+ struct virtchnl_rss_hashcfg *vrh = (struct virtchnl_rss_hashcfg *)msg;
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
@@ -3074,9 +3058,9 @@ static int ice_vc_set_rss_hena(struct ice_vf *vf, u8 *msg)
* disable RSS
*/
status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
- if (status && !vrh->hena) {
+ if (status && !vrh->hashcfg) {
/* only report failure to clear the current RSS configuration if
- * that was clearly the VF's intention (i.e. vrh->hena = 0)
+ * that was clearly the VF's intention (i.e. vrh->hashcfg = 0)
*/
v_ret = ice_err_to_virt_err(status);
goto err;
@@ -3089,14 +3073,18 @@ static int ice_vc_set_rss_hena(struct ice_vf *vf, u8 *msg)
vf->vf_id);
}
- if (vrh->hena) {
- status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hena);
+ if (vrh->hashcfg) {
+ status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hashcfg);
v_ret = ice_err_to_virt_err(status);
}
+ /* save the requested VF configuration */
+ if (!v_ret)
+ vf->rss_hashcfg = vrh->hashcfg;
+
/* send the response to the VF */
err:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, v_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_RSS_HASHCFG, v_ret,
NULL, 0);
}
@@ -3856,48 +3844,6 @@ ice_vc_ena_vlan_offload(struct ice_vsi *vsi,
return 0;
}
-#define ICE_L2TSEL_QRX_CONTEXT_REG_IDX 3
-#define ICE_L2TSEL_BIT_OFFSET 23
-enum ice_l2tsel {
- ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND,
- ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1,
-};
-
-/**
- * ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI
- * @vsi: VSI used to update l2tsel on
- * @l2tsel: l2tsel setting requested
- *
- * Use the l2tsel setting to update all of the Rx queue context bits for l2tsel.
- * This will modify which descriptor field the first offloaded VLAN will be
- * stripped into.
- */
-static void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel)
-{
- struct ice_hw *hw = &vsi->back->hw;
- u32 l2tsel_bit;
- int i;
-
- if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND)
- l2tsel_bit = 0;
- else
- l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET);
-
- for (i = 0; i < vsi->alloc_rxq; i++) {
- u16 pfq = vsi->rxq_map[i];
- u32 qrx_context_offset;
- u32 regval;
-
- qrx_context_offset =
- QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq);
-
- regval = rd32(hw, qrx_context_offset);
- regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET);
- regval |= l2tsel_bit;
- wr32(hw, qrx_context_offset, regval);
- }
-}
-
/**
* ice_vc_ena_vlan_stripping_v2_msg
* @vf: VF the message was received from
@@ -4243,8 +4189,8 @@ static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
.add_vlan_msg = ice_vc_add_vlan_msg,
.remove_vlan_msg = ice_vc_remove_vlan_msg,
.query_rxdid = ice_vc_query_rxdid,
- .get_rss_hena = ice_vc_get_rss_hena,
- .set_rss_hena_msg = ice_vc_set_rss_hena,
+ .get_rss_hashcfg = ice_vc_get_rss_hashcfg,
+ .set_rss_hashcfg = ice_vc_set_rss_hashcfg,
.ena_vlan_stripping = ice_vc_ena_vlan_stripping,
.dis_vlan_stripping = ice_vc_dis_vlan_stripping,
.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
@@ -4380,8 +4326,8 @@ static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
.add_vlan_msg = ice_vc_add_vlan_msg,
.remove_vlan_msg = ice_vc_remove_vlan_msg,
.query_rxdid = ice_vc_query_rxdid,
- .get_rss_hena = ice_vc_get_rss_hena,
- .set_rss_hena_msg = ice_vc_set_rss_hena,
+ .get_rss_hashcfg = ice_vc_get_rss_hashcfg,
+ .set_rss_hashcfg = ice_vc_set_rss_hashcfg,
.ena_vlan_stripping = ice_vc_ena_vlan_stripping,
.dis_vlan_stripping = ice_vc_dis_vlan_stripping,
.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
@@ -4582,11 +4528,11 @@ error_handler:
case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
err = ops->query_rxdid(vf);
break;
- case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
- err = ops->get_rss_hena(vf);
+ case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS:
+ err = ops->get_rss_hashcfg(vf);
break;
- case VIRTCHNL_OP_SET_RSS_HENA:
- err = ops->set_rss_hena_msg(vf, msg);
+ case VIRTCHNL_OP_SET_RSS_HASHCFG:
+ err = ops->set_rss_hashcfg(vf, msg);
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
err = ops->ena_vlan_stripping(vf);
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
index 222990f229d5..71bb456e2d71 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
@@ -57,8 +57,8 @@ struct ice_virtchnl_ops {
int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg);
int (*remove_vlan_msg)(struct ice_vf *vf, u8 *msg);
int (*query_rxdid)(struct ice_vf *vf);
- int (*get_rss_hena)(struct ice_vf *vf);
- int (*set_rss_hena_msg)(struct ice_vf *vf, u8 *msg);
+ int (*get_rss_hashcfg)(struct ice_vf *vf);
+ int (*set_rss_hashcfg)(struct ice_vf *vf, u8 *msg);
int (*ena_vlan_stripping)(struct ice_vf *vf);
int (*dis_vlan_stripping)(struct ice_vf *vf);
int (*handle_rss_cfg_msg)(struct ice_vf *vf, u8 *msg, bool add);
@@ -92,12 +92,31 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id);
void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
struct ice_mbx_data *mbxdata);
+void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx);
+void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx);
+int ice_vf_ena_vlan_promisc(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct ice_vlan *vlan);
+bool ice_is_vlan_promisc_allowed(struct ice_vf *vf);
#else /* CONFIG_PCI_IOV */
static inline void ice_virtchnl_set_dflt_ops(struct ice_vf *vf) { }
static inline void ice_virtchnl_set_repr_ops(struct ice_vf *vf) { }
static inline void ice_vc_notify_vf_link_state(struct ice_vf *vf) { }
static inline void ice_vc_notify_link_state(struct ice_pf *pf) { }
static inline void ice_vc_notify_reset(struct ice_pf *pf) { }
+static inline void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx) { }
+static inline void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx) { }
+
+static inline int ice_vf_ena_vlan_promisc(struct ice_vf *vf,
+ struct ice_vsi *vsi,
+ struct ice_vlan *vlan)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
+{
+ return false;
+}
static inline int
ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
index a3d1579a619a..4c2ec2337b38 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
@@ -65,7 +65,7 @@ static const u32 vlan_v2_allowlist_opcodes[] = {
/* VIRTCHNL_VF_OFFLOAD_RSS_PF */
static const u32 rss_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_CONFIG_RSS_KEY, VIRTCHNL_OP_CONFIG_RSS_LUT,
- VIRTCHNL_OP_GET_RSS_HENA_CAPS, VIRTCHNL_OP_SET_RSS_HENA,
+ VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, VIRTCHNL_OP_SET_RSS_HASHCFG,
VIRTCHNL_OP_CONFIG_RSS_HFUNC,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_vlan_mode.c b/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
index 1279c1ffe31c..fb526cb84776 100644
--- a/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
+++ b/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
@@ -63,7 +63,7 @@ static int
ice_aq_get_vlan_mode(struct ice_hw *hw,
struct ice_aqc_get_vlan_mode *get_params)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
if (!get_params)
return -EINVAL;
@@ -275,7 +275,7 @@ ice_aq_set_vlan_mode(struct ice_hw *hw,
struct ice_aqc_set_vlan_mode *set_params)
{
u8 rdma_packet, mng_vlan_prot_id;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
if (!set_params)
return -EINVAL;
@@ -295,7 +295,7 @@ ice_aq_set_vlan_mode(struct ice_hw *hw,
ice_fill_dflt_direct_cmd_desc(&desc,
ice_aqc_opc_set_vlan_mode_parameters);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
return ice_aq_send_cmd(hw, &desc, set_params, sizeof(*set_params),
NULL);
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
index 5291f2888ef8..ada78f83b3ac 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
@@ -113,7 +113,7 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err) {
dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
goto out;
}
@@ -169,7 +169,7 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err) {
dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %s\n",
- ena, err, ice_aq_str(hw->adminq.sq_last_status));
+ ena, err, libie_aq_str(hw->adminq.sq_last_status));
goto out;
}
@@ -258,7 +258,7 @@ static int __ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, u16 pvid_info)
ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (ret) {
dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %s\n",
- ret, ice_aq_str(hw->adminq.sq_last_status));
+ ret, libie_aq_str(hw->adminq.sq_last_status));
goto out;
}
@@ -306,7 +306,7 @@ int ice_vsi_clear_inner_port_vlan(struct ice_vsi *vsi)
ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (ret)
dev_err(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %s\n",
- ret, ice_aq_str(hw->adminq.sq_last_status));
+ ret, libie_aq_str(hw->adminq.sq_last_status));
kfree(ctxt);
return ret;
@@ -353,7 +353,7 @@ static int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
if (status) {
netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %s\n",
ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
- ice_aq_str(pf->hw.adminq.sq_last_status));
+ libie_aq_str(pf->hw.adminq.sq_last_status));
goto err_out;
}
@@ -497,7 +497,7 @@ int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi, u16 tpid)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
dev_err(ice_pf_to_dev(vsi->back), "update VSI for enabling outer VLAN stripping failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
else
vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
@@ -544,7 +544,7 @@ int ice_vsi_dis_outer_stripping(struct ice_vsi *vsi)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
dev_err(ice_pf_to_dev(vsi->back), "update VSI for disabling outer VLAN stripping failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
else
vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
@@ -604,7 +604,7 @@ int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
dev_err(ice_pf_to_dev(vsi->back), "update VSI for enabling outer VLAN insertion failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
else
vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
@@ -654,7 +654,7 @@ int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
dev_err(ice_pf_to_dev(vsi->back), "update VSI for disabling outer VLAN insertion failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
else
vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
@@ -720,7 +720,7 @@ __ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, u16 vlan_info, u16 tpid)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err) {
dev_err(ice_pf_to_dev(vsi->back), "update VSI for setting outer port based VLAN failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
} else {
vsi->info.port_based_outer_vlan = ctxt->info.port_based_outer_vlan;
vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
@@ -782,7 +782,7 @@ int ice_vsi_clear_outer_port_vlan(struct ice_vsi *vsi)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
dev_err(ice_pf_to_dev(vsi->back), "update VSI for clearing outer port based VLAN failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
kfree(ctxt);
return err;
@@ -830,7 +830,7 @@ int ice_vsi_clear_port_vlan(struct ice_vsi *vsi)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err) {
dev_err(ice_pf_to_dev(vsi->back), "update VSI for clearing port based VLAN failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
} else {
vsi->info.port_based_outer_vlan =
ctxt->info.port_based_outer_vlan;
diff --git a/drivers/net/ethernet/intel/idpf/Makefile b/drivers/net/ethernet/intel/idpf/Makefile
index 83ac5e296382..4ef4b2b5e37a 100644
--- a/drivers/net/ethernet/intel/idpf/Makefile
+++ b/drivers/net/ethernet/intel/idpf/Makefile
@@ -10,6 +10,7 @@ idpf-y := \
idpf_controlq_setup.o \
idpf_dev.o \
idpf_ethtool.o \
+ idpf_idc.o \
idpf_lib.o \
idpf_main.o \
idpf_txrx.o \
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index 1e812c3f62f9..f4c0eaf9bde3 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -12,12 +12,16 @@ struct idpf_vport_max_q;
#include <net/pkt_sched.h>
#include <linux/aer.h>
#include <linux/etherdevice.h>
+#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/bitfield.h>
#include <linux/sctp.h>
#include <linux/ethtool_netlink.h>
#include <net/gro.h>
+#include <linux/net/intel/iidc_rdma.h>
+#include <linux/net/intel/iidc_rdma_idpf.h>
+
#include "virtchnl2.h"
#include "idpf_txrx.h"
#include "idpf_controlq.h"
@@ -194,7 +198,8 @@ struct idpf_vport_max_q {
* @ptp_reg_init: PTP register initialization
*/
struct idpf_reg_ops {
- void (*ctlq_reg_init)(struct idpf_ctlq_create_info *cq);
+ void (*ctlq_reg_init)(struct idpf_adapter *adapter,
+ struct idpf_ctlq_create_info *cq);
int (*intr_reg_init)(struct idpf_vport *vport);
void (*mb_intr_reg_init)(struct idpf_adapter *adapter);
void (*reset_reg_init)(struct idpf_adapter *adapter);
@@ -203,12 +208,25 @@ struct idpf_reg_ops {
void (*ptp_reg_init)(const struct idpf_adapter *adapter);
};
+#define IDPF_MMIO_REG_NUM_STATIC 2
+#define IDPF_PF_MBX_REGION_SZ 4096
+#define IDPF_PF_RSTAT_REGION_SZ 2048
+#define IDPF_VF_MBX_REGION_SZ 10240
+#define IDPF_VF_RSTAT_REGION_SZ 2048
+
/**
* struct idpf_dev_ops - Device specific operations
* @reg_ops: Register operations
+ * @idc_init: IDC initialization
+ * @static_reg_info: array of mailbox and rstat register info
*/
struct idpf_dev_ops {
struct idpf_reg_ops reg_ops;
+
+ int (*idc_init)(struct idpf_adapter *adapter);
+
+ /* static_reg_info[0] is mailbox region, static_reg_info[1] is rstat */
+ struct resource static_reg_info[IDPF_MMIO_REG_NUM_STATIC];
};
/**
@@ -251,6 +269,12 @@ struct idpf_port_stats {
struct virtchnl2_vport_stats vport_stats;
};
+struct idpf_fsteer_fltr {
+ struct list_head list;
+ u32 loc;
+ u32 q_index;
+};
+
/**
* struct idpf_vport - Handle for netdevices and queue resources
* @num_txq: Number of allocated TX queues
@@ -275,6 +299,7 @@ struct idpf_port_stats {
* group will yield total number of RX queues.
* @rxq_model: Splitq queue or single queue queuing model
* @rx_ptype_lkup: Lookup table for ptypes on RX
+ * @vdev_info: IDC vport device info pointer
* @adapter: back pointer to associated adapter
* @netdev: Associated net_device. Each vport should have one and only one
* associated netdev.
@@ -320,6 +345,8 @@ struct idpf_vport {
u32 rxq_model;
struct libeth_rx_pt *rx_ptype_lkup;
+ struct iidc_rdma_vport_dev_info *vdev_info;
+
struct idpf_adapter *adapter;
struct net_device *netdev;
DECLARE_BITMAP(flags, IDPF_VPORT_FLAGS_NBITS);
@@ -379,9 +406,27 @@ struct idpf_rss_data {
};
/**
+ * struct idpf_q_coalesce - User defined coalescing configuration values for
+ * a single queue.
+ * @tx_intr_mode: Dynamic TX ITR or not
+ * @rx_intr_mode: Dynamic RX ITR or not
+ * @tx_coalesce_usecs: TX interrupt throttling rate
+ * @rx_coalesce_usecs: RX interrupt throttling rate
+ *
+ * Used to restore user coalescing configuration after a reset.
+ */
+struct idpf_q_coalesce {
+ u32 tx_intr_mode;
+ u32 rx_intr_mode;
+ u32 tx_coalesce_usecs;
+ u32 rx_coalesce_usecs;
+};
+
+/**
* struct idpf_vport_user_config_data - User defined configuration values for
* each vport.
* @rss_data: See struct idpf_rss_data
+ * @q_coalesce: Array of per queue coalescing data
* @num_req_tx_qs: Number of user requested TX queues through ethtool
* @num_req_rx_qs: Number of user requested RX queues through ethtool
* @num_req_txq_desc: Number of user requested TX queue descriptors through
@@ -390,17 +435,22 @@ struct idpf_rss_data {
* ethtool
* @user_flags: User toggled config flags
* @mac_filter_list: List of MAC filters
+ * @num_fsteer_fltrs: number of flow steering filters
+ * @flow_steer_list: list of flow steering filters
*
* Used to restore configuration after a reset as the vport will get wiped.
*/
struct idpf_vport_user_config_data {
struct idpf_rss_data rss_data;
+ struct idpf_q_coalesce *q_coalesce;
u16 num_req_tx_qs;
u16 num_req_rx_qs;
u32 num_req_txq_desc;
u32 num_req_rxq_desc;
DECLARE_BITMAP(user_flags, __IDPF_USER_FLAGS_NBITS);
struct list_head mac_filter_list;
+ u32 num_fsteer_fltrs;
+ struct list_head flow_steer_list;
};
/**
@@ -507,10 +557,11 @@ struct idpf_vc_xn_manager;
* @flags: See enum idpf_flags
* @reset_reg: See struct idpf_reset_reg
* @hw: Device access data
- * @num_req_msix: Requested number of MSIX vectors
* @num_avail_msix: Available number of MSIX vectors
* @num_msix_entries: Number of entries in MSIX table
* @msix_entries: MSIX table
+ * @num_rdma_msix_entries: Available number of MSIX vectors for RDMA
+ * @rdma_msix_entries: RDMA MSIX table
* @req_vec_chunks: Requested vector chunk data
* @mb_vector: Mailbox vector data
* @vector_stack: Stack to store the msix vector indexes
@@ -539,6 +590,7 @@ struct idpf_vc_xn_manager;
* @caps: Negotiated capabilities with device
* @vcxn_mngr: Virtchnl transaction manager
* @dev_ops: See idpf_dev_ops
+ * @cdev_info: IDC core device info pointer
* @num_vfs: Number of allocated VFs through sysfs. PF does not directly talk
* to VFs but is used to initialize them
* @crc_enable: Enable CRC insertion offload
@@ -561,10 +613,11 @@ struct idpf_adapter {
DECLARE_BITMAP(flags, IDPF_FLAGS_NBITS);
struct idpf_reset_reg reset_reg;
struct idpf_hw hw;
- u16 num_req_msix;
u16 num_avail_msix;
u16 num_msix_entries;
struct msix_entry *msix_entries;
+ u16 num_rdma_msix_entries;
+ struct msix_entry *rdma_msix_entries;
struct virtchnl2_alloc_vectors *req_vec_chunks;
struct idpf_q_vector mb_vector;
struct idpf_vector_lifo vector_stack;
@@ -597,6 +650,7 @@ struct idpf_adapter {
struct idpf_vc_xn_manager *vcxn_mngr;
struct idpf_dev_ops dev_ops;
+ struct iidc_rdma_core_dev_info *cdev_info;
int num_vfs;
bool crc_enable;
bool req_tx_splitq;
@@ -630,17 +684,28 @@ static inline int idpf_is_queue_model_split(u16 q_model)
bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
enum idpf_cap_field field, u64 flag);
+/**
+ * idpf_is_rdma_cap_ena - Determine if RDMA is supported
+ * @adapter: private data struct
+ *
+ * Return: true if RDMA capability is enabled, false otherwise
+ */
+static inline bool idpf_is_rdma_cap_ena(struct idpf_adapter *adapter)
+{
+ return idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_RDMA);
+}
+
#define IDPF_CAP_RSS (\
- VIRTCHNL2_CAP_RSS_IPV4_TCP |\
- VIRTCHNL2_CAP_RSS_IPV4_TCP |\
- VIRTCHNL2_CAP_RSS_IPV4_UDP |\
- VIRTCHNL2_CAP_RSS_IPV4_SCTP |\
- VIRTCHNL2_CAP_RSS_IPV4_OTHER |\
- VIRTCHNL2_CAP_RSS_IPV6_TCP |\
- VIRTCHNL2_CAP_RSS_IPV6_TCP |\
- VIRTCHNL2_CAP_RSS_IPV6_UDP |\
- VIRTCHNL2_CAP_RSS_IPV6_SCTP |\
- VIRTCHNL2_CAP_RSS_IPV6_OTHER)
+ VIRTCHNL2_FLOW_IPV4_TCP |\
+ VIRTCHNL2_FLOW_IPV4_TCP |\
+ VIRTCHNL2_FLOW_IPV4_UDP |\
+ VIRTCHNL2_FLOW_IPV4_SCTP |\
+ VIRTCHNL2_FLOW_IPV4_OTHER |\
+ VIRTCHNL2_FLOW_IPV6_TCP |\
+ VIRTCHNL2_FLOW_IPV6_TCP |\
+ VIRTCHNL2_FLOW_IPV6_UDP |\
+ VIRTCHNL2_FLOW_IPV6_SCTP |\
+ VIRTCHNL2_FLOW_IPV6_OTHER)
#define IDPF_CAP_RSC (\
VIRTCHNL2_CAP_RSC_IPV4_TCP |\
@@ -683,6 +748,17 @@ static inline u16 idpf_get_reserved_vecs(struct idpf_adapter *adapter)
}
/**
+ * idpf_get_reserved_rdma_vecs - Get reserved RDMA vectors
+ * @adapter: private data struct
+ *
+ * Return: number of vectors reserved for RDMA
+ */
+static inline u16 idpf_get_reserved_rdma_vecs(struct idpf_adapter *adapter)
+{
+ return le16_to_cpu(adapter->caps.num_rdma_allocated_vectors);
+}
+
+/**
* idpf_get_default_vports - Get default number of vports
* @adapter: private data struct
*/
@@ -721,6 +797,34 @@ static inline u8 idpf_get_min_tx_pkt_len(struct idpf_adapter *adapter)
}
/**
+ * idpf_get_mbx_reg_addr - Get BAR0 mailbox register address
+ * @adapter: private data struct
+ * @reg_offset: register offset value
+ *
+ * Return: BAR0 mailbox register address based on register offset.
+ */
+static inline void __iomem *idpf_get_mbx_reg_addr(struct idpf_adapter *adapter,
+ resource_size_t reg_offset)
+{
+ return adapter->hw.mbx.vaddr + reg_offset;
+}
+
+/**
+ * idpf_get_rstat_reg_addr - Get BAR0 rstat register address
+ * @adapter: private data struct
+ * @reg_offset: register offset value
+ *
+ * Return: BAR0 rstat register address based on register offset.
+ */
+static inline void __iomem *idpf_get_rstat_reg_addr(struct idpf_adapter *adapter,
+ resource_size_t reg_offset)
+{
+ reg_offset -= adapter->dev_ops.static_reg_info[1].start;
+
+ return adapter->hw.rstat.vaddr + reg_offset;
+}
+
+/**
* idpf_get_reg_addr - Get BAR0 register address
* @adapter: private data struct
* @reg_offset: register offset value
@@ -730,7 +834,30 @@ static inline u8 idpf_get_min_tx_pkt_len(struct idpf_adapter *adapter)
static inline void __iomem *idpf_get_reg_addr(struct idpf_adapter *adapter,
resource_size_t reg_offset)
{
- return (void __iomem *)(adapter->hw.hw_addr + reg_offset);
+ struct idpf_hw *hw = &adapter->hw;
+
+ for (int i = 0; i < hw->num_lan_regs; i++) {
+ struct idpf_mmio_reg *region = &hw->lan_regs[i];
+
+ if (reg_offset >= region->addr_start &&
+ reg_offset < (region->addr_start + region->addr_len)) {
+ /* Convert the offset so that it is relative to the
+ * start of the region. Then add the base address of
+ * the region to get the final address.
+ */
+ reg_offset -= region->addr_start;
+
+ return region->vaddr + reg_offset;
+ }
+ }
+
+ /* It's impossible to hit this case with offsets from the CP. But if we
+ * do for any other reason, the kernel will panic on that register
+ * access. Might as well do it here to make it clear what's happening.
+ */
+ BUG();
+
+ return NULL;
}
/**
@@ -744,7 +871,7 @@ static inline bool idpf_is_reset_detected(struct idpf_adapter *adapter)
if (!adapter->hw.arq)
return true;
- return !(readl(idpf_get_reg_addr(adapter, adapter->hw.arq->reg.len)) &
+ return !(readl(idpf_get_mbx_reg_addr(adapter, adapter->hw.arq->reg.len)) &
adapter->hw.arq->reg.len_mask);
}
@@ -853,5 +980,16 @@ int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs);
u8 idpf_vport_get_hsplit(const struct idpf_vport *vport);
bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val);
-
+int idpf_idc_init(struct idpf_adapter *adapter);
+int idpf_idc_init_aux_core_dev(struct idpf_adapter *adapter,
+ enum iidc_function_type ftype);
+void idpf_idc_deinit_core_aux_device(struct iidc_rdma_core_dev_info *cdev_info);
+void idpf_idc_deinit_vport_aux_device(struct iidc_rdma_vport_dev_info *vdev_info);
+void idpf_idc_issue_reset_event(struct iidc_rdma_core_dev_info *cdev_info);
+void idpf_idc_vdev_mtu_event(struct iidc_rdma_vport_dev_info *vdev_info,
+ enum iidc_rdma_event_type event_type);
+
+int idpf_add_del_fsteer_filters(struct idpf_adapter *adapter,
+ struct virtchnl2_flow_rule_add_del *rule,
+ enum virtchnl2_op opcode);
#endif /* !_IDPF_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.c b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
index b28991dd1870..67894eda2d29 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_controlq.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
@@ -36,19 +36,19 @@ static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
{
/* Update tail to post pre-allocated buffers for rx queues */
if (is_rxq)
- wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1));
+ idpf_mbx_wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1));
/* For non-Mailbox control queues only TAIL need to be set */
if (cq->q_id != -1)
return;
/* Clear Head for both send or receive */
- wr32(hw, cq->reg.head, 0);
+ idpf_mbx_wr32(hw, cq->reg.head, 0);
/* set starting point */
- wr32(hw, cq->reg.bal, lower_32_bits(cq->desc_ring.pa));
- wr32(hw, cq->reg.bah, upper_32_bits(cq->desc_ring.pa));
- wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
+ idpf_mbx_wr32(hw, cq->reg.bal, lower_32_bits(cq->desc_ring.pa));
+ idpf_mbx_wr32(hw, cq->reg.bah, upper_32_bits(cq->desc_ring.pa));
+ idpf_mbx_wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
}
/**
@@ -96,7 +96,7 @@ static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
*/
static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
{
- mutex_lock(&cq->cq_lock);
+ spin_lock(&cq->cq_lock);
/* free ring buffers and the ring itself */
idpf_ctlq_dealloc_ring_res(hw, cq);
@@ -104,8 +104,7 @@ static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
/* Set ring_size to 0 to indicate uninitialized queue */
cq->ring_size = 0;
- mutex_unlock(&cq->cq_lock);
- mutex_destroy(&cq->cq_lock);
+ spin_unlock(&cq->cq_lock);
}
/**
@@ -173,7 +172,7 @@ int idpf_ctlq_add(struct idpf_hw *hw,
idpf_ctlq_init_regs(hw, cq, is_rxq);
- mutex_init(&cq->cq_lock);
+ spin_lock_init(&cq->cq_lock);
list_add(&cq->cq_list, &hw->cq_list_head);
@@ -272,7 +271,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
int err = 0;
int i;
- mutex_lock(&cq->cq_lock);
+ spin_lock(&cq->cq_lock);
/* Ensure there are enough descriptors to send all messages */
num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
@@ -329,10 +328,10 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
*/
dma_wmb();
- wr32(hw, cq->reg.tail, cq->next_to_use);
+ idpf_mbx_wr32(hw, cq->reg.tail, cq->next_to_use);
err_unlock:
- mutex_unlock(&cq->cq_lock);
+ spin_unlock(&cq->cq_lock);
return err;
}
@@ -364,7 +363,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
if (*clean_count > cq->ring_size)
return -EBADR;
- mutex_lock(&cq->cq_lock);
+ spin_lock(&cq->cq_lock);
ntc = cq->next_to_clean;
@@ -397,7 +396,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
cq->next_to_clean = ntc;
- mutex_unlock(&cq->cq_lock);
+ spin_unlock(&cq->cq_lock);
/* Return number of descriptors actually cleaned */
*clean_count = i;
@@ -435,7 +434,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
if (*buff_count > 0)
buffs_avail = true;
- mutex_lock(&cq->cq_lock);
+ spin_lock(&cq->cq_lock);
if (tbp >= cq->ring_size)
tbp = 0;
@@ -521,10 +520,10 @@ post_buffs_out:
dma_wmb();
- wr32(hw, cq->reg.tail, cq->next_to_post);
+ idpf_mbx_wr32(hw, cq->reg.tail, cq->next_to_post);
}
- mutex_unlock(&cq->cq_lock);
+ spin_unlock(&cq->cq_lock);
/* return the number of buffers that were not posted */
*buff_count = *buff_count - i;
@@ -552,7 +551,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
u16 i;
/* take the lock before we start messing with the ring */
- mutex_lock(&cq->cq_lock);
+ spin_lock(&cq->cq_lock);
ntc = cq->next_to_clean;
@@ -614,7 +613,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
cq->next_to_clean = ntc;
- mutex_unlock(&cq->cq_lock);
+ spin_unlock(&cq->cq_lock);
*num_q_msg = i;
if (*num_q_msg == 0)
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.h b/drivers/net/ethernet/intel/idpf/idpf_controlq.h
index c1aba09e9856..de4ece40c2ff 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_controlq.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.h
@@ -94,12 +94,26 @@ struct idpf_mbxq_desc {
u32 pf_vf_id; /* used by CP when sending to PF */
};
+/* Max number of MMIO regions not including the mailbox and rstat regions in
+ * the fallback case when the whole bar is mapped.
+ */
+#define IDPF_MMIO_MAP_FALLBACK_MAX_REMAINING 3
+
+struct idpf_mmio_reg {
+ void __iomem *vaddr;
+ resource_size_t addr_start;
+ resource_size_t addr_len;
+};
+
/* Define the driver hardware struct to replace other control structs as needed
* Align to ctlq_hw_info
*/
struct idpf_hw {
- void __iomem *hw_addr;
- resource_size_t hw_addr_len;
+ struct idpf_mmio_reg mbx;
+ struct idpf_mmio_reg rstat;
+ /* Array of remaining LAN BAR regions */
+ int num_lan_regs;
+ struct idpf_mmio_reg *lan_regs;
struct idpf_adapter *back;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
index 9642494a67d8..3414c5f9a831 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
@@ -99,7 +99,7 @@ struct idpf_ctlq_info {
enum idpf_ctlq_type cq_type;
int q_id;
- struct mutex cq_lock; /* control queue lock */
+ spinlock_t cq_lock; /* control queue lock */
/* used for interrupt processing */
u16 next_to_use;
u16 next_to_clean;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c
index 3fae81f1f988..bfa60f7d43de 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c
@@ -10,10 +10,13 @@
/**
* idpf_ctlq_reg_init - initialize default mailbox registers
+ * @adapter: adapter structure
* @cq: pointer to the array of create control queues
*/
-static void idpf_ctlq_reg_init(struct idpf_ctlq_create_info *cq)
+static void idpf_ctlq_reg_init(struct idpf_adapter *adapter,
+ struct idpf_ctlq_create_info *cq)
{
+ resource_size_t mbx_start = adapter->dev_ops.static_reg_info[0].start;
int i;
for (i = 0; i < IDPF_NUM_DFLT_MBX_Q; i++) {
@@ -22,22 +25,22 @@ static void idpf_ctlq_reg_init(struct idpf_ctlq_create_info *cq)
switch (ccq->type) {
case IDPF_CTLQ_TYPE_MAILBOX_TX:
/* set head and tail registers in our local struct */
- ccq->reg.head = PF_FW_ATQH;
- ccq->reg.tail = PF_FW_ATQT;
- ccq->reg.len = PF_FW_ATQLEN;
- ccq->reg.bah = PF_FW_ATQBAH;
- ccq->reg.bal = PF_FW_ATQBAL;
+ ccq->reg.head = PF_FW_ATQH - mbx_start;
+ ccq->reg.tail = PF_FW_ATQT - mbx_start;
+ ccq->reg.len = PF_FW_ATQLEN - mbx_start;
+ ccq->reg.bah = PF_FW_ATQBAH - mbx_start;
+ ccq->reg.bal = PF_FW_ATQBAL - mbx_start;
ccq->reg.len_mask = PF_FW_ATQLEN_ATQLEN_M;
ccq->reg.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M;
ccq->reg.head_mask = PF_FW_ATQH_ATQH_M;
break;
case IDPF_CTLQ_TYPE_MAILBOX_RX:
/* set head and tail registers in our local struct */
- ccq->reg.head = PF_FW_ARQH;
- ccq->reg.tail = PF_FW_ARQT;
- ccq->reg.len = PF_FW_ARQLEN;
- ccq->reg.bah = PF_FW_ARQBAH;
- ccq->reg.bal = PF_FW_ARQBAL;
+ ccq->reg.head = PF_FW_ARQH - mbx_start;
+ ccq->reg.tail = PF_FW_ARQT - mbx_start;
+ ccq->reg.len = PF_FW_ARQLEN - mbx_start;
+ ccq->reg.bah = PF_FW_ARQBAH - mbx_start;
+ ccq->reg.bal = PF_FW_ARQBAL - mbx_start;
ccq->reg.len_mask = PF_FW_ARQLEN_ARQLEN_M;
ccq->reg.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M;
ccq->reg.head_mask = PF_FW_ARQH_ARQH_M;
@@ -130,7 +133,7 @@ free_reg_vals:
*/
static void idpf_reset_reg_init(struct idpf_adapter *adapter)
{
- adapter->reset_reg.rstat = idpf_get_reg_addr(adapter, PFGEN_RSTAT);
+ adapter->reset_reg.rstat = idpf_get_rstat_reg_addr(adapter, PFGEN_RSTAT);
adapter->reset_reg.rstat_m = PFGEN_RSTAT_PFR_STATE_M;
}
@@ -144,9 +147,9 @@ static void idpf_trigger_reset(struct idpf_adapter *adapter,
{
u32 reset_reg;
- reset_reg = readl(idpf_get_reg_addr(adapter, PFGEN_CTRL));
+ reset_reg = readl(idpf_get_rstat_reg_addr(adapter, PFGEN_CTRL));
writel(reset_reg | PFGEN_CTRL_PFSWR,
- idpf_get_reg_addr(adapter, PFGEN_CTRL));
+ idpf_get_rstat_reg_addr(adapter, PFGEN_CTRL));
}
/**
@@ -162,6 +165,17 @@ static void idpf_ptp_reg_init(const struct idpf_adapter *adapter)
}
/**
+ * idpf_idc_register - register for IDC callbacks
+ * @adapter: Driver specific private structure
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_idc_register(struct idpf_adapter *adapter)
+{
+ return idpf_idc_init_aux_core_dev(adapter, IIDC_FUNCTION_TYPE_PF);
+}
+
+/**
* idpf_reg_ops_init - Initialize register API function pointers
* @adapter: Driver specific private structure
*/
@@ -182,4 +196,11 @@ static void idpf_reg_ops_init(struct idpf_adapter *adapter)
void idpf_dev_ops_init(struct idpf_adapter *adapter)
{
idpf_reg_ops_init(adapter);
+
+ adapter->dev_ops.idc_init = idpf_idc_register;
+
+ resource_set_range(&adapter->dev_ops.static_reg_info[0],
+ PF_FW_BASE, IDPF_PF_MBX_REGION_SZ);
+ resource_set_range(&adapter->dev_ops.static_reg_info[1],
+ PFGEN_RTRIG, IDPF_PF_RSTAT_REGION_SZ);
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
index 9bdb309b668e..0eb812ac19c2 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -3,6 +3,7 @@
#include "idpf.h"
#include "idpf_ptp.h"
+#include "idpf_virtchnl.h"
/**
* idpf_get_rxnfc - command to get RX flow classification rules
@@ -13,26 +14,312 @@
* Returns Success if the command is supported.
*/
static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
- u32 __always_unused *rule_locs)
+ u32 *rule_locs)
{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_user_config_data *user_config;
+ struct idpf_fsteer_fltr *f;
struct idpf_vport *vport;
+ unsigned int cnt = 0;
+ int err = 0;
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
+ user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = vport->num_rxq;
- idpf_vport_ctrl_unlock(netdev);
-
- return 0;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = user_config->num_fsteer_fltrs;
+ cmd->data = idpf_fsteer_max_rules(vport);
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = -EINVAL;
+ list_for_each_entry(f, &user_config->flow_steer_list, list)
+ if (f->loc == cmd->fs.location) {
+ cmd->fs.ring_cookie = f->q_index;
+ err = 0;
+ break;
+ }
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ cmd->data = idpf_fsteer_max_rules(vport);
+ list_for_each_entry(f, &user_config->flow_steer_list, list) {
+ if (cnt == cmd->rule_cnt) {
+ err = -EMSGSIZE;
+ break;
+ }
+ rule_locs[cnt] = f->loc;
+ cnt++;
+ }
+ if (!err)
+ cmd->rule_cnt = user_config->num_fsteer_fltrs;
+ break;
default:
break;
}
idpf_vport_ctrl_unlock(netdev);
- return -EOPNOTSUPP;
+ return err;
+}
+
+static void idpf_fsteer_fill_ipv4(struct virtchnl2_proto_hdrs *hdrs,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ struct iphdr *iph;
+
+ hdrs->proto_hdr[0].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_IPV4);
+
+ iph = (struct iphdr *)hdrs->proto_hdr[0].buffer_spec;
+ iph->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
+ iph->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
+
+ iph = (struct iphdr *)hdrs->proto_hdr[0].buffer_mask;
+ iph->saddr = fsp->m_u.tcp_ip4_spec.ip4src;
+ iph->daddr = fsp->m_u.tcp_ip4_spec.ip4dst;
+}
+
+static void idpf_fsteer_fill_udp(struct virtchnl2_proto_hdrs *hdrs,
+ struct ethtool_rx_flow_spec *fsp,
+ bool v4)
+{
+ struct udphdr *udph, *udpm;
+
+ hdrs->proto_hdr[1].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_UDP);
+
+ udph = (struct udphdr *)hdrs->proto_hdr[1].buffer_spec;
+ udpm = (struct udphdr *)hdrs->proto_hdr[1].buffer_mask;
+
+ if (v4) {
+ udph->source = fsp->h_u.udp_ip4_spec.psrc;
+ udph->dest = fsp->h_u.udp_ip4_spec.pdst;
+ udpm->source = fsp->m_u.udp_ip4_spec.psrc;
+ udpm->dest = fsp->m_u.udp_ip4_spec.pdst;
+ } else {
+ udph->source = fsp->h_u.udp_ip6_spec.psrc;
+ udph->dest = fsp->h_u.udp_ip6_spec.pdst;
+ udpm->source = fsp->m_u.udp_ip6_spec.psrc;
+ udpm->dest = fsp->m_u.udp_ip6_spec.pdst;
+ }
+}
+
+static void idpf_fsteer_fill_tcp(struct virtchnl2_proto_hdrs *hdrs,
+ struct ethtool_rx_flow_spec *fsp,
+ bool v4)
+{
+ struct tcphdr *tcph, *tcpm;
+
+ hdrs->proto_hdr[1].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_TCP);
+
+ tcph = (struct tcphdr *)hdrs->proto_hdr[1].buffer_spec;
+ tcpm = (struct tcphdr *)hdrs->proto_hdr[1].buffer_mask;
+
+ if (v4) {
+ tcph->source = fsp->h_u.tcp_ip4_spec.psrc;
+ tcph->dest = fsp->h_u.tcp_ip4_spec.pdst;
+ tcpm->source = fsp->m_u.tcp_ip4_spec.psrc;
+ tcpm->dest = fsp->m_u.tcp_ip4_spec.pdst;
+ } else {
+ tcph->source = fsp->h_u.tcp_ip6_spec.psrc;
+ tcph->dest = fsp->h_u.tcp_ip6_spec.pdst;
+ tcpm->source = fsp->m_u.tcp_ip6_spec.psrc;
+ tcpm->dest = fsp->m_u.tcp_ip6_spec.pdst;
+ }
+}
+
+/**
+ * idpf_add_flow_steer - add a Flow Steering filter
+ * @netdev: network interface device structure
+ * @cmd: command to add Flow Steering filter
+ *
+ * Return: 0 on success and negative values for failure
+ */
+static int idpf_add_flow_steer(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct idpf_fsteer_fltr *fltr, *parent = NULL, *f;
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_user_config_data *user_config;
+ struct ethtool_rx_flow_spec *fsp = &cmd->fs;
+ struct virtchnl2_flow_rule_add_del *rule;
+ struct idpf_vport_config *vport_config;
+ struct virtchnl2_rule_action_set *acts;
+ struct virtchnl2_flow_rule_info *info;
+ struct virtchnl2_proto_hdrs *hdrs;
+ struct idpf_vport *vport;
+ u32 flow_type, q_index;
+ u16 num_rxq;
+ int err;
+
+ vport = idpf_netdev_to_vport(netdev);
+ vport_config = vport->adapter->vport_config[np->vport_idx];
+ user_config = &vport_config->user_config;
+ num_rxq = user_config->num_req_rx_qs;
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+ if (flow_type != fsp->flow_type)
+ return -EINVAL;
+
+ if (!idpf_sideband_action_ena(vport, fsp) ||
+ !idpf_sideband_flow_type_ena(vport, flow_type))
+ return -EOPNOTSUPP;
+
+ if (user_config->num_fsteer_fltrs > idpf_fsteer_max_rules(vport))
+ return -ENOSPC;
+
+ q_index = fsp->ring_cookie;
+ if (q_index >= num_rxq)
+ return -EINVAL;
+
+ rule = kzalloc(struct_size(rule, rule_info, 1), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ rule->vport_id = cpu_to_le32(vport->vport_id);
+ rule->count = cpu_to_le32(1);
+ info = &rule->rule_info[0];
+ info->rule_id = cpu_to_le32(fsp->location);
+
+ hdrs = &info->rule_cfg.proto_hdrs;
+ hdrs->tunnel_level = 0;
+ hdrs->count = cpu_to_le32(2);
+
+ acts = &info->rule_cfg.action_set;
+ acts->count = cpu_to_le32(1);
+ acts->actions[0].action_type = cpu_to_le32(VIRTCHNL2_ACTION_QUEUE);
+ acts->actions[0].act_conf.q_id = cpu_to_le32(q_index);
+
+ switch (flow_type) {
+ case UDP_V4_FLOW:
+ idpf_fsteer_fill_ipv4(hdrs, fsp);
+ idpf_fsteer_fill_udp(hdrs, fsp, true);
+ break;
+ case TCP_V4_FLOW:
+ idpf_fsteer_fill_ipv4(hdrs, fsp);
+ idpf_fsteer_fill_tcp(hdrs, fsp, true);
+ break;
+ default:
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = idpf_add_del_fsteer_filters(vport->adapter, rule,
+ VIRTCHNL2_OP_ADD_FLOW_RULE);
+ if (err)
+ goto out;
+
+ if (info->status != cpu_to_le32(VIRTCHNL2_FLOW_RULE_SUCCESS)) {
+ err = -EIO;
+ goto out;
+ }
+
+ fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
+ if (!fltr) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ fltr->loc = fsp->location;
+ fltr->q_index = q_index;
+ list_for_each_entry(f, &user_config->flow_steer_list, list) {
+ if (f->loc >= fltr->loc)
+ break;
+ parent = f;
+ }
+
+ parent ? list_add(&fltr->list, &parent->list) :
+ list_add(&fltr->list, &user_config->flow_steer_list);
+
+ user_config->num_fsteer_fltrs++;
+
+out:
+ kfree(rule);
+ return err;
+}
+
+/**
+ * idpf_del_flow_steer - delete a Flow Steering filter
+ * @netdev: network interface device structure
+ * @cmd: command to add Flow Steering filter
+ *
+ * Return: 0 on success and negative values for failure
+ */
+static int idpf_del_flow_steer(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_user_config_data *user_config;
+ struct ethtool_rx_flow_spec *fsp = &cmd->fs;
+ struct virtchnl2_flow_rule_add_del *rule;
+ struct idpf_vport_config *vport_config;
+ struct virtchnl2_flow_rule_info *info;
+ struct idpf_fsteer_fltr *f, *iter;
+ struct idpf_vport *vport;
+ int err;
+
+ vport = idpf_netdev_to_vport(netdev);
+ vport_config = vport->adapter->vport_config[np->vport_idx];
+ user_config = &vport_config->user_config;
+
+ if (!idpf_sideband_action_ena(vport, fsp))
+ return -EOPNOTSUPP;
+
+ rule = kzalloc(struct_size(rule, rule_info, 1), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ rule->vport_id = cpu_to_le32(vport->vport_id);
+ rule->count = cpu_to_le32(1);
+ info = &rule->rule_info[0];
+ info->rule_id = cpu_to_le32(fsp->location);
+
+ err = idpf_add_del_fsteer_filters(vport->adapter, rule,
+ VIRTCHNL2_OP_DEL_FLOW_RULE);
+ if (err)
+ goto out;
+
+ if (info->status != cpu_to_le32(VIRTCHNL2_FLOW_RULE_SUCCESS)) {
+ err = -EIO;
+ goto out;
+ }
+
+ list_for_each_entry_safe(f, iter,
+ &user_config->flow_steer_list, list) {
+ if (f->loc == fsp->location) {
+ list_del(&f->list);
+ kfree(f);
+ user_config->num_fsteer_fltrs--;
+ goto out;
+ }
+ }
+ err = -EINVAL;
+
+out:
+ kfree(rule);
+ return err;
+}
+
+static int idpf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ int ret = -EOPNOTSUPP;
+
+ idpf_vport_ctrl_lock(netdev);
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ ret = idpf_add_flow_steer(netdev, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = idpf_del_flow_steer(netdev, cmd);
+ break;
+ default:
+ break;
+ }
+
+ idpf_vport_ctrl_unlock(netdev);
+ return ret;
}
/**
@@ -47,7 +334,7 @@ static u32 idpf_get_rxfh_key_size(struct net_device *netdev)
struct idpf_vport_user_config_data *user_config;
if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
- return -EOPNOTSUPP;
+ return 0;
user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
@@ -66,7 +353,7 @@ static u32 idpf_get_rxfh_indir_size(struct net_device *netdev)
struct idpf_vport_user_config_data *user_config;
if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
- return -EOPNOTSUPP;
+ return 0;
user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
@@ -1090,12 +1377,14 @@ static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
/**
* __idpf_set_q_coalesce - set ITR values for specific queue
* @ec: ethtool structure from user to update ITR settings
+ * @q_coal: per queue coalesce settings
* @qv: queue vector for which itr values has to be set
* @is_rxq: is queue type rx
*
* Returns 0 on success, negative otherwise.
*/
static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
+ struct idpf_q_coalesce *q_coal,
struct idpf_q_vector *qv, bool is_rxq)
{
u32 use_adaptive_coalesce, coalesce_usecs;
@@ -1139,20 +1428,25 @@ static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
if (is_rxq) {
qv->rx_itr_value = coalesce_usecs;
+ q_coal->rx_coalesce_usecs = coalesce_usecs;
if (use_adaptive_coalesce) {
qv->rx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_coal->rx_intr_mode = IDPF_ITR_DYNAMIC;
} else {
qv->rx_intr_mode = !IDPF_ITR_DYNAMIC;
- idpf_vport_intr_write_itr(qv, qv->rx_itr_value,
- false);
+ q_coal->rx_intr_mode = !IDPF_ITR_DYNAMIC;
+ idpf_vport_intr_write_itr(qv, coalesce_usecs, false);
}
} else {
qv->tx_itr_value = coalesce_usecs;
+ q_coal->tx_coalesce_usecs = coalesce_usecs;
if (use_adaptive_coalesce) {
qv->tx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_coal->tx_intr_mode = IDPF_ITR_DYNAMIC;
} else {
qv->tx_intr_mode = !IDPF_ITR_DYNAMIC;
- idpf_vport_intr_write_itr(qv, qv->tx_itr_value, true);
+ q_coal->tx_intr_mode = !IDPF_ITR_DYNAMIC;
+ idpf_vport_intr_write_itr(qv, coalesce_usecs, true);
}
}
@@ -1165,6 +1459,7 @@ static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
/**
* idpf_set_q_coalesce - set ITR values for specific queue
* @vport: vport associated to the queue that need updating
+ * @q_coal: per queue coalesce settings
* @ec: coalesce settings to program the device with
* @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
* @is_rxq: is queue type rx
@@ -1172,6 +1467,7 @@ static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
* Return 0 on success, and negative on failure
*/
static int idpf_set_q_coalesce(const struct idpf_vport *vport,
+ struct idpf_q_coalesce *q_coal,
const struct ethtool_coalesce *ec,
int q_num, bool is_rxq)
{
@@ -1180,7 +1476,7 @@ static int idpf_set_q_coalesce(const struct idpf_vport *vport,
qv = is_rxq ? idpf_find_rxq_vec(vport, q_num) :
idpf_find_txq_vec(vport, q_num);
- if (qv && __idpf_set_q_coalesce(ec, qv, is_rxq))
+ if (qv && __idpf_set_q_coalesce(ec, q_coal, qv, is_rxq))
return -EINVAL;
return 0;
@@ -1201,9 +1497,13 @@ static int idpf_set_coalesce(struct net_device *netdev,
struct netlink_ext_ack *extack)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_user_config_data *user_config;
+ struct idpf_q_coalesce *q_coal;
struct idpf_vport *vport;
int i, err = 0;
+ user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
+
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
@@ -1211,13 +1511,15 @@ static int idpf_set_coalesce(struct net_device *netdev,
goto unlock_mutex;
for (i = 0; i < vport->num_txq; i++) {
- err = idpf_set_q_coalesce(vport, ec, i, false);
+ q_coal = &user_config->q_coalesce[i];
+ err = idpf_set_q_coalesce(vport, q_coal, ec, i, false);
if (err)
goto unlock_mutex;
}
for (i = 0; i < vport->num_rxq; i++) {
- err = idpf_set_q_coalesce(vport, ec, i, true);
+ q_coal = &user_config->q_coalesce[i];
+ err = idpf_set_q_coalesce(vport, q_coal, ec, i, true);
if (err)
goto unlock_mutex;
}
@@ -1239,20 +1541,25 @@ unlock_mutex:
static int idpf_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
struct ethtool_coalesce *ec)
{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_user_config_data *user_config;
+ struct idpf_q_coalesce *q_coal;
struct idpf_vport *vport;
int err;
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
+ user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
+ q_coal = &user_config->q_coalesce[q_num];
- err = idpf_set_q_coalesce(vport, ec, q_num, false);
+ err = idpf_set_q_coalesce(vport, q_coal, ec, q_num, false);
if (err) {
idpf_vport_ctrl_unlock(netdev);
return err;
}
- err = idpf_set_q_coalesce(vport, ec, q_num, true);
+ err = idpf_set_q_coalesce(vport, q_coal, ec, q_num, true);
idpf_vport_ctrl_unlock(netdev);
@@ -1394,6 +1701,7 @@ static const struct ethtool_ops idpf_ethtool_ops = {
.get_sset_count = idpf_get_sset_count,
.get_channels = idpf_get_channels,
.get_rxnfc = idpf_get_rxnfc,
+ .set_rxnfc = idpf_set_rxnfc,
.get_rxfh_key_size = idpf_get_rxfh_key_size,
.get_rxfh_indir_size = idpf_get_rxfh_indir_size,
.get_rxfh = idpf_get_rxfh,
diff --git a/drivers/net/ethernet/intel/idpf/idpf_idc.c b/drivers/net/ethernet/intel/idpf/idpf_idc.c
new file mode 100644
index 000000000000..4d2905103215
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_idc.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2025 Intel Corporation */
+
+#include <linux/export.h>
+
+#include "idpf.h"
+#include "idpf_virtchnl.h"
+
+static DEFINE_IDA(idpf_idc_ida);
+
+#define IDPF_IDC_MAX_ADEV_NAME_LEN 15
+
+/**
+ * idpf_idc_init - Called to initialize IDC
+ * @adapter: driver private data structure
+ *
+ * Return: 0 on success or cap not enabled, error code on failure.
+ */
+int idpf_idc_init(struct idpf_adapter *adapter)
+{
+ int err;
+
+ if (!idpf_is_rdma_cap_ena(adapter) ||
+ !adapter->dev_ops.idc_init)
+ return 0;
+
+ err = adapter->dev_ops.idc_init(adapter);
+ if (err)
+ dev_err(&adapter->pdev->dev, "failed to initialize idc: %d\n",
+ err);
+
+ return err;
+}
+
+/**
+ * idpf_vport_adev_release - function to be mapped to aux dev's release op
+ * @dev: pointer to device to free
+ */
+static void idpf_vport_adev_release(struct device *dev)
+{
+ struct iidc_rdma_vport_auxiliary_dev *iadev;
+
+ iadev = container_of(dev, struct iidc_rdma_vport_auxiliary_dev, adev.dev);
+ kfree(iadev);
+ iadev = NULL;
+}
+
+/**
+ * idpf_plug_vport_aux_dev - allocate and register a vport Auxiliary device
+ * @cdev_info: IDC core device info pointer
+ * @vdev_info: IDC vport device info pointer
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_plug_vport_aux_dev(struct iidc_rdma_core_dev_info *cdev_info,
+ struct iidc_rdma_vport_dev_info *vdev_info)
+{
+ struct iidc_rdma_vport_auxiliary_dev *iadev;
+ char name[IDPF_IDC_MAX_ADEV_NAME_LEN];
+ struct auxiliary_device *adev;
+ int ret;
+
+ iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
+ if (!iadev)
+ return -ENOMEM;
+
+ adev = &iadev->adev;
+ vdev_info->adev = &iadev->adev;
+ iadev->vdev_info = vdev_info;
+
+ ret = ida_alloc(&idpf_idc_ida, GFP_KERNEL);
+ if (ret < 0) {
+ pr_err("failed to allocate unique device ID for Auxiliary driver\n");
+ goto err_ida_alloc;
+ }
+ adev->id = ret;
+ adev->dev.release = idpf_vport_adev_release;
+ adev->dev.parent = &cdev_info->pdev->dev;
+ sprintf(name, "%04x.rdma.vdev", cdev_info->pdev->vendor);
+ adev->name = name;
+
+ ret = auxiliary_device_init(adev);
+ if (ret)
+ goto err_aux_dev_init;
+
+ ret = auxiliary_device_add(adev);
+ if (ret)
+ goto err_aux_dev_add;
+
+ return 0;
+
+err_aux_dev_add:
+ auxiliary_device_uninit(adev);
+err_aux_dev_init:
+ ida_free(&idpf_idc_ida, adev->id);
+err_ida_alloc:
+ vdev_info->adev = NULL;
+ kfree(iadev);
+
+ return ret;
+}
+
+/**
+ * idpf_idc_init_aux_vport_dev - initialize vport Auxiliary Device(s)
+ * @vport: virtual port data struct
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_idc_init_aux_vport_dev(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct iidc_rdma_vport_dev_info *vdev_info;
+ struct iidc_rdma_core_dev_info *cdev_info;
+ struct virtchnl2_create_vport *vport_msg;
+ int err;
+
+ vport_msg = (struct virtchnl2_create_vport *)
+ adapter->vport_params_recvd[vport->idx];
+
+ if (!(le16_to_cpu(vport_msg->vport_flags) & VIRTCHNL2_VPORT_ENABLE_RDMA))
+ return 0;
+
+ vport->vdev_info = kzalloc(sizeof(*vdev_info), GFP_KERNEL);
+ if (!vport->vdev_info)
+ return -ENOMEM;
+
+ cdev_info = vport->adapter->cdev_info;
+
+ vdev_info = vport->vdev_info;
+ vdev_info->vport_id = vport->vport_id;
+ vdev_info->netdev = vport->netdev;
+ vdev_info->core_adev = cdev_info->adev;
+
+ err = idpf_plug_vport_aux_dev(cdev_info, vdev_info);
+ if (err) {
+ vport->vdev_info = NULL;
+ kfree(vdev_info);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_idc_vdev_mtu_event - Function to handle IDC vport mtu change events
+ * @vdev_info: IDC vport device info pointer
+ * @event_type: type of event to pass to handler
+ */
+void idpf_idc_vdev_mtu_event(struct iidc_rdma_vport_dev_info *vdev_info,
+ enum iidc_rdma_event_type event_type)
+{
+ struct iidc_rdma_vport_auxiliary_drv *iadrv;
+ struct iidc_rdma_event event = { };
+ struct auxiliary_device *adev;
+
+ if (!vdev_info)
+ /* RDMA is not enabled */
+ return;
+
+ set_bit(event_type, event.type);
+
+ device_lock(&vdev_info->adev->dev);
+ adev = vdev_info->adev;
+ if (!adev || !adev->dev.driver)
+ goto unlock;
+ iadrv = container_of(adev->dev.driver,
+ struct iidc_rdma_vport_auxiliary_drv,
+ adrv.driver);
+ if (iadrv->event_handler)
+ iadrv->event_handler(vdev_info, &event);
+unlock:
+ device_unlock(&vdev_info->adev->dev);
+}
+
+/**
+ * idpf_core_adev_release - function to be mapped to aux dev's release op
+ * @dev: pointer to device to free
+ */
+static void idpf_core_adev_release(struct device *dev)
+{
+ struct iidc_rdma_core_auxiliary_dev *iadev;
+
+ iadev = container_of(dev, struct iidc_rdma_core_auxiliary_dev, adev.dev);
+ kfree(iadev);
+ iadev = NULL;
+}
+
+/**
+ * idpf_plug_core_aux_dev - allocate and register an Auxiliary device
+ * @cdev_info: IDC core device info pointer
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_plug_core_aux_dev(struct iidc_rdma_core_dev_info *cdev_info)
+{
+ struct iidc_rdma_core_auxiliary_dev *iadev;
+ char name[IDPF_IDC_MAX_ADEV_NAME_LEN];
+ struct auxiliary_device *adev;
+ int ret;
+
+ iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
+ if (!iadev)
+ return -ENOMEM;
+
+ adev = &iadev->adev;
+ cdev_info->adev = adev;
+ iadev->cdev_info = cdev_info;
+
+ ret = ida_alloc(&idpf_idc_ida, GFP_KERNEL);
+ if (ret < 0) {
+ pr_err("failed to allocate unique device ID for Auxiliary driver\n");
+ goto err_ida_alloc;
+ }
+ adev->id = ret;
+ adev->dev.release = idpf_core_adev_release;
+ adev->dev.parent = &cdev_info->pdev->dev;
+ sprintf(name, "%04x.rdma.core", cdev_info->pdev->vendor);
+ adev->name = name;
+
+ ret = auxiliary_device_init(adev);
+ if (ret)
+ goto err_aux_dev_init;
+
+ ret = auxiliary_device_add(adev);
+ if (ret)
+ goto err_aux_dev_add;
+
+ return 0;
+
+err_aux_dev_add:
+ auxiliary_device_uninit(adev);
+err_aux_dev_init:
+ ida_free(&idpf_idc_ida, adev->id);
+err_ida_alloc:
+ cdev_info->adev = NULL;
+ kfree(iadev);
+
+ return ret;
+}
+
+/**
+ * idpf_unplug_aux_dev - unregister and free an Auxiliary device
+ * @adev: auxiliary device struct
+ */
+static void idpf_unplug_aux_dev(struct auxiliary_device *adev)
+{
+ if (!adev)
+ return;
+
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+
+ ida_free(&idpf_idc_ida, adev->id);
+}
+
+/**
+ * idpf_idc_issue_reset_event - Function to handle reset IDC event
+ * @cdev_info: IDC core device info pointer
+ */
+void idpf_idc_issue_reset_event(struct iidc_rdma_core_dev_info *cdev_info)
+{
+ enum iidc_rdma_event_type event_type = IIDC_RDMA_EVENT_WARN_RESET;
+ struct iidc_rdma_core_auxiliary_drv *iadrv;
+ struct iidc_rdma_event event = { };
+ struct auxiliary_device *adev;
+
+ if (!cdev_info)
+ /* RDMA is not enabled */
+ return;
+
+ set_bit(event_type, event.type);
+
+ device_lock(&cdev_info->adev->dev);
+
+ adev = cdev_info->adev;
+ if (!adev || !adev->dev.driver)
+ goto unlock;
+
+ iadrv = container_of(adev->dev.driver,
+ struct iidc_rdma_core_auxiliary_drv,
+ adrv.driver);
+ if (iadrv->event_handler)
+ iadrv->event_handler(cdev_info, &event);
+unlock:
+ device_unlock(&cdev_info->adev->dev);
+}
+
+/**
+ * idpf_idc_vport_dev_up - called when CORE is ready for vport aux devs
+ * @adapter: private data struct
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_idc_vport_dev_up(struct idpf_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_alloc_vports; i++) {
+ struct idpf_vport *vport = adapter->vports[i];
+
+ if (!vport)
+ continue;
+
+ if (!vport->vdev_info)
+ err = idpf_idc_init_aux_vport_dev(vport);
+ else
+ err = idpf_plug_vport_aux_dev(vport->adapter->cdev_info,
+ vport->vdev_info);
+ }
+
+ return err;
+}
+
+/**
+ * idpf_idc_vport_dev_down - called CORE is leaving vport aux dev support state
+ * @adapter: private data struct
+ */
+static void idpf_idc_vport_dev_down(struct idpf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_alloc_vports; i++) {
+ struct idpf_vport *vport = adapter->vports[i];
+
+ if (!vport)
+ continue;
+
+ idpf_unplug_aux_dev(vport->vdev_info->adev);
+ vport->vdev_info->adev = NULL;
+ }
+}
+
+/**
+ * idpf_idc_vport_dev_ctrl - Called by an Auxiliary Driver
+ * @cdev_info: IDC core device info pointer
+ * @up: RDMA core driver status
+ *
+ * This callback function is accessed by an Auxiliary Driver to indicate
+ * whether core driver is ready to support vport driver load or if vport
+ * drivers need to be taken down.
+ *
+ * Return: 0 on success or error code on failure.
+ */
+int idpf_idc_vport_dev_ctrl(struct iidc_rdma_core_dev_info *cdev_info, bool up)
+{
+ struct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev);
+
+ if (up)
+ return idpf_idc_vport_dev_up(adapter);
+
+ idpf_idc_vport_dev_down(adapter);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(idpf_idc_vport_dev_ctrl);
+
+/**
+ * idpf_idc_request_reset - Called by an Auxiliary Driver
+ * @cdev_info: IDC core device info pointer
+ * @reset_type: function, core or other
+ *
+ * This callback function is accessed by an Auxiliary Driver to request a reset
+ * on the Auxiliary Device.
+ *
+ * Return: 0 on success or error code on failure.
+ */
+int idpf_idc_request_reset(struct iidc_rdma_core_dev_info *cdev_info,
+ enum iidc_rdma_reset_type __always_unused reset_type)
+{
+ struct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev);
+
+ if (!idpf_is_reset_in_prog(adapter)) {
+ set_bit(IDPF_HR_FUNC_RESET, adapter->flags);
+ queue_delayed_work(adapter->vc_event_wq,
+ &adapter->vc_event_task,
+ msecs_to_jiffies(10));
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(idpf_idc_request_reset);
+
+/**
+ * idpf_idc_init_msix_data - initialize MSIX data for the cdev_info structure
+ * @adapter: driver private data structure
+ */
+static void
+idpf_idc_init_msix_data(struct idpf_adapter *adapter)
+{
+ struct iidc_rdma_core_dev_info *cdev_info;
+ struct iidc_rdma_priv_dev_info *privd;
+
+ if (!adapter->rdma_msix_entries)
+ return;
+
+ cdev_info = adapter->cdev_info;
+ privd = cdev_info->iidc_priv;
+
+ privd->msix_entries = adapter->rdma_msix_entries;
+ privd->msix_count = adapter->num_rdma_msix_entries;
+}
+
+/**
+ * idpf_idc_init_aux_core_dev - initialize Auxiliary Device(s)
+ * @adapter: driver private data structure
+ * @ftype: PF or VF
+ *
+ * Return: 0 on success or error code on failure.
+ */
+int idpf_idc_init_aux_core_dev(struct idpf_adapter *adapter,
+ enum iidc_function_type ftype)
+{
+ struct iidc_rdma_core_dev_info *cdev_info;
+ struct iidc_rdma_priv_dev_info *privd;
+ int err, i;
+
+ adapter->cdev_info = kzalloc(sizeof(*cdev_info), GFP_KERNEL);
+ if (!adapter->cdev_info)
+ return -ENOMEM;
+ cdev_info = adapter->cdev_info;
+
+ privd = kzalloc(sizeof(*privd), GFP_KERNEL);
+ if (!privd) {
+ err = -ENOMEM;
+ goto err_privd_alloc;
+ }
+
+ cdev_info->iidc_priv = privd;
+ cdev_info->pdev = adapter->pdev;
+ cdev_info->rdma_protocol = IIDC_RDMA_PROTOCOL_ROCEV2;
+ privd->ftype = ftype;
+
+ privd->mapped_mem_regions =
+ kcalloc(adapter->hw.num_lan_regs,
+ sizeof(struct iidc_rdma_lan_mapped_mem_region),
+ GFP_KERNEL);
+ if (!privd->mapped_mem_regions) {
+ err = -ENOMEM;
+ goto err_plug_aux_dev;
+ }
+
+ privd->num_memory_regions = cpu_to_le16(adapter->hw.num_lan_regs);
+ for (i = 0; i < adapter->hw.num_lan_regs; i++) {
+ privd->mapped_mem_regions[i].region_addr =
+ adapter->hw.lan_regs[i].vaddr;
+ privd->mapped_mem_regions[i].size =
+ cpu_to_le64(adapter->hw.lan_regs[i].addr_len);
+ privd->mapped_mem_regions[i].start_offset =
+ cpu_to_le64(adapter->hw.lan_regs[i].addr_start);
+ }
+
+ idpf_idc_init_msix_data(adapter);
+
+ err = idpf_plug_core_aux_dev(cdev_info);
+ if (err)
+ goto err_free_mem_regions;
+
+ return 0;
+
+err_free_mem_regions:
+ kfree(privd->mapped_mem_regions);
+ privd->mapped_mem_regions = NULL;
+err_plug_aux_dev:
+ kfree(privd);
+err_privd_alloc:
+ kfree(cdev_info);
+ adapter->cdev_info = NULL;
+
+ return err;
+}
+
+/**
+ * idpf_idc_deinit_core_aux_device - de-initialize Auxiliary Device(s)
+ * @cdev_info: IDC core device info pointer
+ */
+void idpf_idc_deinit_core_aux_device(struct iidc_rdma_core_dev_info *cdev_info)
+{
+ struct iidc_rdma_priv_dev_info *privd;
+
+ if (!cdev_info)
+ return;
+
+ idpf_unplug_aux_dev(cdev_info->adev);
+
+ privd = cdev_info->iidc_priv;
+ kfree(privd->mapped_mem_regions);
+ kfree(privd);
+ kfree(cdev_info);
+}
+
+/**
+ * idpf_idc_deinit_vport_aux_device - de-initialize Auxiliary Device(s)
+ * @vdev_info: IDC vport device info pointer
+ */
+void idpf_idc_deinit_vport_aux_device(struct iidc_rdma_vport_dev_info *vdev_info)
+{
+ if (!vdev_info)
+ return;
+
+ idpf_unplug_aux_dev(vdev_info->adev);
+
+ kfree(vdev_info);
+}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 4eb20ec2accb..2c2a3e85d693 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -88,6 +88,8 @@ void idpf_intr_rel(struct idpf_adapter *adapter)
idpf_deinit_vector_stack(adapter);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
+ kfree(adapter->rdma_msix_entries);
+ adapter->rdma_msix_entries = NULL;
}
/**
@@ -299,13 +301,33 @@ rel_lock:
*/
int idpf_intr_req(struct idpf_adapter *adapter)
{
+ u16 num_lan_vecs, min_lan_vecs, num_rdma_vecs = 0, min_rdma_vecs = 0;
u16 default_vports = idpf_get_default_vports(adapter);
int num_q_vecs, total_vecs, num_vec_ids;
- int min_vectors, v_actual, err;
+ int min_vectors, actual_vecs, err;
unsigned int vector;
u16 *vecids;
+ int i;
total_vecs = idpf_get_reserved_vecs(adapter);
+ num_lan_vecs = total_vecs;
+ if (idpf_is_rdma_cap_ena(adapter)) {
+ num_rdma_vecs = idpf_get_reserved_rdma_vecs(adapter);
+ min_rdma_vecs = IDPF_MIN_RDMA_VEC;
+
+ if (!num_rdma_vecs) {
+ /* If idpf_get_reserved_rdma_vecs is 0, vectors are
+ * pulled from the LAN pool.
+ */
+ num_rdma_vecs = min_rdma_vecs;
+ } else if (num_rdma_vecs < min_rdma_vecs) {
+ dev_err(&adapter->pdev->dev,
+ "Not enough vectors reserved for RDMA (min: %u, current: %u)\n",
+ min_rdma_vecs, num_rdma_vecs);
+ return -EINVAL;
+ }
+ }
+
num_q_vecs = total_vecs - IDPF_MBX_Q_VEC;
err = idpf_send_alloc_vectors_msg(adapter, num_q_vecs);
@@ -316,52 +338,76 @@ int idpf_intr_req(struct idpf_adapter *adapter)
return -EAGAIN;
}
- min_vectors = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports;
- v_actual = pci_alloc_irq_vectors(adapter->pdev, min_vectors,
- total_vecs, PCI_IRQ_MSIX);
- if (v_actual < min_vectors) {
- dev_err(&adapter->pdev->dev, "Failed to allocate MSIX vectors: %d\n",
- v_actual);
- err = -EAGAIN;
+ min_lan_vecs = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports;
+ min_vectors = min_lan_vecs + min_rdma_vecs;
+ actual_vecs = pci_alloc_irq_vectors(adapter->pdev, min_vectors,
+ total_vecs, PCI_IRQ_MSIX);
+ if (actual_vecs < 0) {
+ dev_err(&adapter->pdev->dev, "Failed to allocate minimum MSIX vectors required: %d\n",
+ min_vectors);
+ err = actual_vecs;
goto send_dealloc_vecs;
}
- adapter->msix_entries = kcalloc(v_actual, sizeof(struct msix_entry),
- GFP_KERNEL);
+ if (idpf_is_rdma_cap_ena(adapter)) {
+ if (actual_vecs < total_vecs) {
+ dev_warn(&adapter->pdev->dev,
+ "Warning: %d vectors requested, only %d available. Defaulting to minimum (%d) for RDMA and remaining for LAN.\n",
+ total_vecs, actual_vecs, IDPF_MIN_RDMA_VEC);
+ num_rdma_vecs = IDPF_MIN_RDMA_VEC;
+ }
+ adapter->rdma_msix_entries = kcalloc(num_rdma_vecs,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!adapter->rdma_msix_entries) {
+ err = -ENOMEM;
+ goto free_irq;
+ }
+ }
+
+ num_lan_vecs = actual_vecs - num_rdma_vecs;
+ adapter->msix_entries = kcalloc(num_lan_vecs, sizeof(struct msix_entry),
+ GFP_KERNEL);
if (!adapter->msix_entries) {
err = -ENOMEM;
- goto free_irq;
+ goto free_rdma_msix;
}
adapter->mb_vector.v_idx = le16_to_cpu(adapter->caps.mailbox_vector_id);
- vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
+ vecids = kcalloc(actual_vecs, sizeof(u16), GFP_KERNEL);
if (!vecids) {
err = -ENOMEM;
goto free_msix;
}
- num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs,
+ num_vec_ids = idpf_get_vec_ids(adapter, vecids, actual_vecs,
&adapter->req_vec_chunks->vchunks);
- if (num_vec_ids < v_actual) {
+ if (num_vec_ids < actual_vecs) {
err = -EINVAL;
goto free_vecids;
}
- for (vector = 0; vector < v_actual; vector++) {
+ for (vector = 0; vector < num_lan_vecs; vector++) {
adapter->msix_entries[vector].entry = vecids[vector];
adapter->msix_entries[vector].vector =
pci_irq_vector(adapter->pdev, vector);
}
+ for (i = 0; i < num_rdma_vecs; vector++, i++) {
+ adapter->rdma_msix_entries[i].entry = vecids[vector];
+ adapter->rdma_msix_entries[i].vector =
+ pci_irq_vector(adapter->pdev, vector);
+ }
- adapter->num_req_msix = total_vecs;
- adapter->num_msix_entries = v_actual;
/* 'num_avail_msix' is used to distribute excess vectors to the vports
* after considering the minimum vectors required per each default
* vport
*/
- adapter->num_avail_msix = v_actual - min_vectors;
+ adapter->num_avail_msix = num_lan_vecs - min_lan_vecs;
+ adapter->num_msix_entries = num_lan_vecs;
+ if (idpf_is_rdma_cap_ena(adapter))
+ adapter->num_rdma_msix_entries = num_rdma_vecs;
/* Fill MSIX vector lifo stack with vector indexes */
err = idpf_init_vector_stack(adapter);
@@ -383,6 +429,9 @@ free_vecids:
free_msix:
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
+free_rdma_msix:
+ kfree(adapter->rdma_msix_entries);
+ adapter->rdma_msix_entries = NULL;
free_irq:
pci_free_irq_vectors(adapter->pdev);
send_dealloc_vecs:
@@ -755,6 +804,10 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
dflt_features |= NETIF_F_RXHASH;
+ if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS,
+ VIRTCHNL2_CAP_FLOW_STEER) &&
+ idpf_vport_is_cap_ena(vport, VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER))
+ dflt_features |= NETIF_F_NTUPLE;
if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V4))
csum_offloads |= NETIF_F_IP_CSUM;
if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V6))
@@ -972,6 +1025,8 @@ static void idpf_vport_dealloc(struct idpf_vport *vport)
struct idpf_adapter *adapter = vport->adapter;
unsigned int i = vport->idx;
+ idpf_idc_deinit_vport_aux_device(vport->vdev_info);
+
idpf_deinit_mac_addr(vport);
idpf_vport_stop(vport);
@@ -1079,8 +1134,10 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
if (!vport)
return vport;
+ num_max_q = max(max_q->max_txq, max_q->max_rxq);
if (!adapter->vport_config[idx]) {
struct idpf_vport_config *vport_config;
+ struct idpf_q_coalesce *q_coal;
vport_config = kzalloc(sizeof(*vport_config), GFP_KERNEL);
if (!vport_config) {
@@ -1089,6 +1146,21 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
return NULL;
}
+ q_coal = kcalloc(num_max_q, sizeof(*q_coal), GFP_KERNEL);
+ if (!q_coal) {
+ kfree(vport_config);
+ kfree(vport);
+
+ return NULL;
+ }
+ for (int i = 0; i < num_max_q; i++) {
+ q_coal[i].tx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_coal[i].tx_coalesce_usecs = IDPF_ITR_TX_DEF;
+ q_coal[i].rx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_coal[i].rx_coalesce_usecs = IDPF_ITR_RX_DEF;
+ }
+ vport_config->user_config.q_coalesce = q_coal;
+
adapter->vport_config[idx] = vport_config;
}
@@ -1098,7 +1170,6 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
vport->default_vport = adapter->num_alloc_vports <
idpf_get_default_vports(adapter);
- num_max_q = max(max_q->max_txq, max_q->max_rxq);
vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
if (!vport->q_vector_idxs)
goto free_vport;
@@ -1481,6 +1552,7 @@ void idpf_init_task(struct work_struct *work)
spin_lock_init(&vport_config->mac_filter_list_lock);
INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list);
+ INIT_LIST_HEAD(&vport_config->user_config.flow_steer_list);
err = idpf_check_supported_desc_ids(vport);
if (err) {
@@ -1738,6 +1810,8 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter)
} else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) {
bool is_reset = idpf_is_reset_detected(adapter);
+ idpf_idc_issue_reset_event(adapter->cdev_info);
+
idpf_set_vport_state(adapter);
idpf_vc_core_deinit(adapter);
if (!is_reset)
@@ -1785,6 +1859,10 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter)
unlock_mutex:
mutex_unlock(&adapter->vport_ctrl_lock);
+ /* Wait until all vports are created to init RDMA CORE AUX */
+ if (!err)
+ err = idpf_idc_init(adapter);
+
return err;
}
@@ -1868,6 +1946,9 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
idpf_vport_calc_num_q_desc(new_vport);
break;
case IDPF_SR_MTU_CHANGE:
+ idpf_idc_vdev_mtu_event(vport->vdev_info,
+ IIDC_RDMA_EVENT_BEFORE_MTU_CHANGE);
+ break;
case IDPF_SR_RSC_CHANGE:
break;
default:
@@ -1912,9 +1993,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
if (current_state == __IDPF_VPORT_UP)
err = idpf_vport_open(vport);
- kfree(new_vport);
-
- return err;
+ goto free_vport;
err_reset:
idpf_send_add_queues_msg(vport, vport->num_txq, vport->num_complq,
@@ -1927,6 +2006,10 @@ err_open:
free_vport:
kfree(new_vport);
+ if (reset_cause == IDPF_SR_MTU_CHANGE)
+ idpf_idc_vdev_mtu_event(vport->vdev_info,
+ IIDC_RDMA_EVENT_AFTER_MTU_CHANGE);
+
return err;
}
@@ -2314,8 +2397,12 @@ void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size)
struct idpf_adapter *adapter = hw->back;
size_t sz = ALIGN(size, 4096);
- mem->va = dma_alloc_coherent(&adapter->pdev->dev, sz,
- &mem->pa, GFP_KERNEL);
+ /* The control queue resources are freed under a spinlock, contiguous
+ * pages will avoid IOMMU remapping and the use vmap (and vunmap in
+ * dma_free_*() path.
+ */
+ mem->va = dma_alloc_attrs(&adapter->pdev->dev, sz, &mem->pa,
+ GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS);
mem->size = sz;
return mem->va;
@@ -2330,8 +2417,8 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
{
struct idpf_adapter *adapter = hw->back;
- dma_free_coherent(&adapter->pdev->dev, mem->size,
- mem->va, mem->pa);
+ dma_free_attrs(&adapter->pdev->dev, mem->size,
+ mem->va, mem->pa, DMA_ATTR_FORCE_CONTIGUOUS);
mem->size = 0;
mem->va = NULL;
mem->pa = 0;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
index 0efd9c0c7a90..dfe9126f1f4a 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_main.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
@@ -62,6 +62,7 @@ destroy_wqs:
destroy_workqueue(adapter->vc_event_wq);
for (i = 0; i < adapter->max_vports; i++) {
+ kfree(adapter->vport_config[i]->user_config.q_coalesce);
kfree(adapter->vport_config[i]);
adapter->vport_config[i] = NULL;
}
@@ -106,15 +107,37 @@ static void idpf_shutdown(struct pci_dev *pdev)
*/
static int idpf_cfg_hw(struct idpf_adapter *adapter)
{
+ resource_size_t res_start, mbx_start, rstat_start;
struct pci_dev *pdev = adapter->pdev;
struct idpf_hw *hw = &adapter->hw;
+ struct device *dev = &pdev->dev;
+ long len;
+
+ res_start = pci_resource_start(pdev, 0);
+
+ /* Map mailbox space for virtchnl communication */
+ mbx_start = res_start + adapter->dev_ops.static_reg_info[0].start;
+ len = resource_size(&adapter->dev_ops.static_reg_info[0]);
+ hw->mbx.vaddr = devm_ioremap(dev, mbx_start, len);
+ if (!hw->mbx.vaddr) {
+ pci_err(pdev, "failed to allocate BAR0 mbx region\n");
+
+ return -ENOMEM;
+ }
+ hw->mbx.addr_start = adapter->dev_ops.static_reg_info[0].start;
+ hw->mbx.addr_len = len;
- hw->hw_addr = pcim_iomap_table(pdev)[0];
- if (!hw->hw_addr) {
- pci_err(pdev, "failed to allocate PCI iomap table\n");
+ /* Map rstat space for resets */
+ rstat_start = res_start + adapter->dev_ops.static_reg_info[1].start;
+ len = resource_size(&adapter->dev_ops.static_reg_info[1]);
+ hw->rstat.vaddr = devm_ioremap(dev, rstat_start, len);
+ if (!hw->rstat.vaddr) {
+ pci_err(pdev, "failed to allocate BAR0 rstat region\n");
return -ENOMEM;
}
+ hw->rstat.addr_start = adapter->dev_ops.static_reg_info[1].start;
+ hw->rstat.addr_len = len;
hw->back = adapter;
@@ -161,9 +184,9 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_free;
- err = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ err = pcim_request_region(pdev, 0, pci_name(pdev));
if (err) {
- pci_err(pdev, "pcim_iomap_regions failed %pe\n", ERR_PTR(err));
+ pci_err(pdev, "pcim_request_region failed %pe\n", ERR_PTR(err));
goto err_free;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_mem.h b/drivers/net/ethernet/intel/idpf/idpf_mem.h
index b21a04fccf0f..2aaabdc02dd2 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_mem.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_mem.h
@@ -12,9 +12,9 @@ struct idpf_dma_mem {
size_t size;
};
-#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
-#define rd32(a, reg) readl((a)->hw_addr + (reg))
-#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
-#define rd64(a, reg) readq((a)->hw_addr + (reg))
+#define idpf_mbx_wr32(a, reg, value) writel((value), ((a)->mbx.vaddr + (reg)))
+#define idpf_mbx_rd32(a, reg) readl((a)->mbx.vaddr + (reg))
+#define idpf_mbx_wr64(a, reg, value) writeq((value), ((a)->mbx.vaddr + (reg)))
+#define idpf_mbx_rd64(a, reg) readq((a)->mbx.vaddr + (reg))
#endif /* _IDPF_MEM_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
index 4f8725c85332..ee21f2ff0cad 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ptp.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
@@ -42,6 +42,13 @@ void idpf_ptp_get_features_access(const struct idpf_adapter *adapter)
direct,
mailbox);
+ /* Get the cross timestamp */
+ direct = VIRTCHNL2_CAP_PTP_GET_CROSS_TIME;
+ mailbox = VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB;
+ ptp->get_cross_tstamp_access = idpf_ptp_get_access(adapter,
+ direct,
+ mailbox);
+
/* Set the device clock time */
direct = VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME;
mailbox = VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME;
@@ -171,6 +178,127 @@ static int idpf_ptp_read_src_clk_reg(struct idpf_adapter *adapter, u64 *src_clk,
return 0;
}
+#if IS_ENABLED(CONFIG_ARM_ARCH_TIMER) || IS_ENABLED(CONFIG_X86)
+/**
+ * idpf_ptp_get_sync_device_time_direct - Get the cross time stamp values
+ * directly
+ * @adapter: Driver specific private structure
+ * @dev_time: 64bit main timer value
+ * @sys_time: 64bit system time value
+ */
+static void idpf_ptp_get_sync_device_time_direct(struct idpf_adapter *adapter,
+ u64 *dev_time, u64 *sys_time)
+{
+ u32 dev_time_lo, dev_time_hi, sys_time_lo, sys_time_hi;
+ struct idpf_ptp *ptp = adapter->ptp;
+
+ spin_lock(&ptp->read_dev_clk_lock);
+
+ idpf_ptp_enable_shtime(adapter);
+
+ dev_time_lo = readl(ptp->dev_clk_regs.dev_clk_ns_l);
+ dev_time_hi = readl(ptp->dev_clk_regs.dev_clk_ns_h);
+
+ sys_time_lo = readl(ptp->dev_clk_regs.sys_time_ns_l);
+ sys_time_hi = readl(ptp->dev_clk_regs.sys_time_ns_h);
+
+ spin_unlock(&ptp->read_dev_clk_lock);
+
+ *dev_time = (u64)dev_time_hi << 32 | dev_time_lo;
+ *sys_time = (u64)sys_time_hi << 32 | sys_time_lo;
+}
+
+/**
+ * idpf_ptp_get_sync_device_time_mailbox - Get the cross time stamp values
+ * through mailbox
+ * @adapter: Driver specific private structure
+ * @dev_time: 64bit main timer value expressed in nanoseconds
+ * @sys_time: 64bit system time value expressed in nanoseconds
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_get_sync_device_time_mailbox(struct idpf_adapter *adapter,
+ u64 *dev_time, u64 *sys_time)
+{
+ struct idpf_ptp_dev_timers cross_time;
+ int err;
+
+ err = idpf_ptp_get_cross_time(adapter, &cross_time);
+ if (err)
+ return err;
+
+ *dev_time = cross_time.dev_clk_time_ns;
+ *sys_time = cross_time.sys_time_ns;
+
+ return err;
+}
+
+/**
+ * idpf_ptp_get_sync_device_time - Get the cross time stamp info
+ * @device: Current device time
+ * @system: System counter value read synchronously with device time
+ * @ctx: Context provided by timekeeping code
+ *
+ * The device and the system clocks time read simultaneously.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_get_sync_device_time(ktime_t *device,
+ struct system_counterval_t *system,
+ void *ctx)
+{
+ struct idpf_adapter *adapter = ctx;
+ u64 ns_time_dev, ns_time_sys;
+ int err;
+
+ switch (adapter->ptp->get_cross_tstamp_access) {
+ case IDPF_PTP_NONE:
+ return -EOPNOTSUPP;
+ case IDPF_PTP_DIRECT:
+ idpf_ptp_get_sync_device_time_direct(adapter, &ns_time_dev,
+ &ns_time_sys);
+ break;
+ case IDPF_PTP_MAILBOX:
+ err = idpf_ptp_get_sync_device_time_mailbox(adapter,
+ &ns_time_dev,
+ &ns_time_sys);
+ if (err)
+ return err;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ *device = ns_to_ktime(ns_time_dev);
+
+ system->cs_id = IS_ENABLED(CONFIG_X86) ? CSID_X86_ART
+ : CSID_ARM_ARCH_COUNTER;
+ system->cycles = ns_time_sys;
+ system->use_nsecs = true;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_get_crosststamp - Capture a device cross timestamp
+ * @info: the driver's PTP info structure
+ * @cts: The memory to fill the cross timestamp info
+ *
+ * Capture a cross timestamp between the system time and the device PTP hardware
+ * clock.
+ *
+ * Return: cross timestamp value on success, -errno on failure.
+ */
+static int idpf_ptp_get_crosststamp(struct ptp_clock_info *info,
+ struct system_device_crosststamp *cts)
+{
+ struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
+
+ return get_device_system_crosststamp(idpf_ptp_get_sync_device_time,
+ adapter, NULL, cts);
+}
+#endif /* CONFIG_ARM_ARCH_TIMER || CONFIG_X86 */
+
/**
* idpf_ptp_gettimex64 - Get the time of the clock
* @info: the driver's PTP info structure
@@ -661,6 +789,14 @@ static void idpf_ptp_set_caps(const struct idpf_adapter *adapter)
info->verify = idpf_ptp_verify_pin;
info->enable = idpf_ptp_gpio_enable;
info->do_aux_work = idpf_ptp_do_aux_work;
+#if IS_ENABLED(CONFIG_ARM_ARCH_TIMER)
+ info->getcrosststamp = idpf_ptp_get_crosststamp;
+#elif IS_ENABLED(CONFIG_X86)
+ if (pcie_ptm_enabled(adapter->pdev) &&
+ boot_cpu_has(X86_FEATURE_ART) &&
+ boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
+ info->getcrosststamp = idpf_ptp_get_crosststamp;
+#endif /* CONFIG_ARM_ARCH_TIMER */
}
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.h b/drivers/net/ethernet/intel/idpf/idpf_ptp.h
index a876749d6116..785da03e4cf5 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ptp.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.h
@@ -21,6 +21,8 @@ struct idpf_ptp_cmd {
* @dev_clk_ns_h: high part of the device clock register
* @phy_clk_ns_l: low part of the PHY clock register
* @phy_clk_ns_h: high part of the PHY clock register
+ * @sys_time_ns_l: low part of the system time register
+ * @sys_time_ns_h: high part of the system time register
* @incval_l: low part of the increment value register
* @incval_h: high part of the increment value register
* @shadj_l: low part of the shadow adjust register
@@ -42,6 +44,10 @@ struct idpf_ptp_dev_clk_regs {
void __iomem *phy_clk_ns_l;
void __iomem *phy_clk_ns_h;
+ /* System time */
+ void __iomem *sys_time_ns_l;
+ void __iomem *sys_time_ns_h;
+
/* Main timer adjustments */
void __iomem *incval_l;
void __iomem *incval_h;
@@ -162,6 +168,7 @@ struct idpf_ptp_vport_tx_tstamp_caps {
* @dev_clk_regs: the set of registers to access the device clock
* @caps: PTP capabilities negotiated with the Control Plane
* @get_dev_clk_time_access: access type for getting the device clock time
+ * @get_cross_tstamp_access: access type for the cross timestamping
* @set_dev_clk_time_access: access type for setting the device clock time
* @adj_dev_clk_time_access: access type for the adjusting the device clock
* @tx_tstamp_access: access type for the Tx timestamp value read
@@ -182,6 +189,7 @@ struct idpf_ptp {
struct idpf_ptp_dev_clk_regs dev_clk_regs;
u32 caps;
enum idpf_ptp_access get_dev_clk_time_access:2;
+ enum idpf_ptp_access get_cross_tstamp_access:2;
enum idpf_ptp_access set_dev_clk_time_access:2;
enum idpf_ptp_access adj_dev_clk_time_access:2;
enum idpf_ptp_access tx_tstamp_access:2;
@@ -264,6 +272,8 @@ void idpf_ptp_get_features_access(const struct idpf_adapter *adapter);
bool idpf_ptp_get_txq_tstamp_capability(struct idpf_tx_queue *txq);
int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter,
struct idpf_ptp_dev_timers *dev_clk_time);
+int idpf_ptp_get_cross_time(struct idpf_adapter *adapter,
+ struct idpf_ptp_dev_timers *cross_time);
int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter, u64 time);
int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter, u64 incval);
int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter, s64 delta);
@@ -305,6 +315,13 @@ idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter,
return -EOPNOTSUPP;
}
+static inline int
+idpf_ptp_get_cross_time(struct idpf_adapter *adapter,
+ struct idpf_ptp_dev_timers *cross_time)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter,
u64 time)
{
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index 993c354aa27a..555879b1248d 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -1006,7 +1006,7 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget)
break;
skip_data:
- rx_buf->page = NULL;
+ rx_buf->netmem = 0;
IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc);
cleaned_count++;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 5cf440e09d0a..66a1b040639d 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -383,12 +383,12 @@ err_out:
*/
static void idpf_rx_page_rel(struct libeth_fqe *rx_buf)
{
- if (unlikely(!rx_buf->page))
+ if (unlikely(!rx_buf->netmem))
return;
- page_pool_put_full_page(rx_buf->page->pp, rx_buf->page, false);
+ libeth_rx_recycle_slow(rx_buf->netmem);
- rx_buf->page = NULL;
+ rx_buf->netmem = 0;
rx_buf->offset = 0;
}
@@ -3240,10 +3240,10 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
unsigned int size)
{
- u32 hr = rx_buf->page->pp->p.offset;
+ u32 hr = netmem_get_pp(rx_buf->netmem)->p.offset;
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
- rx_buf->offset + hr, size, rx_buf->truesize);
+ skb_add_rx_frag_netmem(skb, skb_shinfo(skb)->nr_frags, rx_buf->netmem,
+ rx_buf->offset + hr, size, rx_buf->truesize);
}
/**
@@ -3266,16 +3266,22 @@ static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr,
struct libeth_fqe *buf, u32 data_len)
{
u32 copy = data_len <= L1_CACHE_BYTES ? data_len : ETH_HLEN;
+ struct page *hdr_page, *buf_page;
const void *src;
void *dst;
- if (!libeth_rx_sync_for_cpu(buf, copy))
+ if (unlikely(netmem_is_net_iov(buf->netmem)) ||
+ !libeth_rx_sync_for_cpu(buf, copy))
return 0;
- dst = page_address(hdr->page) + hdr->offset + hdr->page->pp->p.offset;
- src = page_address(buf->page) + buf->offset + buf->page->pp->p.offset;
- memcpy(dst, src, LARGEST_ALIGN(copy));
+ hdr_page = __netmem_to_page(hdr->netmem);
+ buf_page = __netmem_to_page(buf->netmem);
+ dst = page_address(hdr_page) + hdr->offset +
+ pp_page_to_nmdesc(hdr_page)->pp->p.offset;
+ src = page_address(buf_page) + buf->offset +
+ pp_page_to_nmdesc(buf_page)->pp->p.offset;
+ memcpy(dst, src, LARGEST_ALIGN(copy));
buf->offset += copy;
return copy;
@@ -3291,11 +3297,12 @@ static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr,
*/
struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size)
{
- u32 hr = buf->page->pp->p.offset;
+ struct page *buf_page = __netmem_to_page(buf->netmem);
+ u32 hr = pp_page_to_nmdesc(buf_page)->pp->p.offset;
struct sk_buff *skb;
void *va;
- va = page_address(buf->page) + buf->offset;
+ va = page_address(buf_page) + buf->offset;
prefetch(va + hr);
skb = napi_build_skb(va, buf->truesize);
@@ -3429,7 +3436,8 @@ static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
if (unlikely(!hdr_len && !skb)) {
hdr_len = idpf_rx_hsplit_wa(hdr, rx_buf, pkt_len);
- pkt_len -= hdr_len;
+ /* If failed, drop both buffers by setting len to 0 */
+ pkt_len -= hdr_len ? : pkt_len;
u64_stats_update_begin(&rxq->stats_sync);
u64_stats_inc(&rxq->q_stats.hsplit_buf_ovf);
@@ -3446,7 +3454,7 @@ static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
u64_stats_update_end(&rxq->stats_sync);
}
- hdr->page = NULL;
+ hdr->netmem = 0;
payload:
if (!libeth_rx_sync_for_cpu(rx_buf, pkt_len))
@@ -3462,7 +3470,7 @@ payload:
break;
skip_data:
- rx_buf->page = NULL;
+ rx_buf->netmem = 0;
idpf_rx_post_buf_refill(refillq, buf_id);
IDPF_RX_BUMP_NTC(rxq, ntc);
@@ -4349,9 +4357,13 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
int idpf_vport_intr_alloc(struct idpf_vport *vport)
{
u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector;
+ struct idpf_vport_user_config_data *user_config;
struct idpf_q_vector *q_vector;
+ struct idpf_q_coalesce *q_coal;
u32 complqs_per_vector, v_idx;
+ u16 idx = vport->idx;
+ user_config = &vport->adapter->vport_config[idx]->user_config;
vport->q_vectors = kcalloc(vport->num_q_vectors,
sizeof(struct idpf_q_vector), GFP_KERNEL);
if (!vport->q_vectors)
@@ -4369,14 +4381,15 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
q_vector = &vport->q_vectors[v_idx];
+ q_coal = &user_config->q_coalesce[v_idx];
q_vector->vport = vport;
- q_vector->tx_itr_value = IDPF_ITR_TX_DEF;
- q_vector->tx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_vector->tx_itr_value = q_coal->tx_coalesce_usecs;
+ q_vector->tx_intr_mode = q_coal->tx_intr_mode;
q_vector->tx_itr_idx = VIRTCHNL2_ITR_IDX_1;
- q_vector->rx_itr_value = IDPF_ITR_RX_DEF;
- q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_vector->rx_itr_value = q_coal->rx_coalesce_usecs;
+ q_vector->rx_intr_mode = q_coal->rx_intr_mode;
q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx),
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 36a0f828a6f8..281de655a813 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -57,6 +57,7 @@
/* Default vector sharing */
#define IDPF_MBX_Q_VEC 1
#define IDPF_MIN_Q_VEC 1
+#define IDPF_MIN_RDMA_VEC 2
#define IDPF_DFLT_TX_Q_DESC_COUNT 512
#define IDPF_DFLT_TX_COMPLQ_DESC_COUNT 512
diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
index aba828abcb17..259d50fded67 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
@@ -9,10 +9,13 @@
/**
* idpf_vf_ctlq_reg_init - initialize default mailbox registers
+ * @adapter: adapter structure
* @cq: pointer to the array of create control queues
*/
-static void idpf_vf_ctlq_reg_init(struct idpf_ctlq_create_info *cq)
+static void idpf_vf_ctlq_reg_init(struct idpf_adapter *adapter,
+ struct idpf_ctlq_create_info *cq)
{
+ resource_size_t mbx_start = adapter->dev_ops.static_reg_info[0].start;
int i;
for (i = 0; i < IDPF_NUM_DFLT_MBX_Q; i++) {
@@ -21,22 +24,22 @@ static void idpf_vf_ctlq_reg_init(struct idpf_ctlq_create_info *cq)
switch (ccq->type) {
case IDPF_CTLQ_TYPE_MAILBOX_TX:
/* set head and tail registers in our local struct */
- ccq->reg.head = VF_ATQH;
- ccq->reg.tail = VF_ATQT;
- ccq->reg.len = VF_ATQLEN;
- ccq->reg.bah = VF_ATQBAH;
- ccq->reg.bal = VF_ATQBAL;
+ ccq->reg.head = VF_ATQH - mbx_start;
+ ccq->reg.tail = VF_ATQT - mbx_start;
+ ccq->reg.len = VF_ATQLEN - mbx_start;
+ ccq->reg.bah = VF_ATQBAH - mbx_start;
+ ccq->reg.bal = VF_ATQBAL - mbx_start;
ccq->reg.len_mask = VF_ATQLEN_ATQLEN_M;
ccq->reg.len_ena_mask = VF_ATQLEN_ATQENABLE_M;
ccq->reg.head_mask = VF_ATQH_ATQH_M;
break;
case IDPF_CTLQ_TYPE_MAILBOX_RX:
/* set head and tail registers in our local struct */
- ccq->reg.head = VF_ARQH;
- ccq->reg.tail = VF_ARQT;
- ccq->reg.len = VF_ARQLEN;
- ccq->reg.bah = VF_ARQBAH;
- ccq->reg.bal = VF_ARQBAL;
+ ccq->reg.head = VF_ARQH - mbx_start;
+ ccq->reg.tail = VF_ARQT - mbx_start;
+ ccq->reg.len = VF_ARQLEN - mbx_start;
+ ccq->reg.bah = VF_ARQBAH - mbx_start;
+ ccq->reg.bal = VF_ARQBAL - mbx_start;
ccq->reg.len_mask = VF_ARQLEN_ARQLEN_M;
ccq->reg.len_ena_mask = VF_ARQLEN_ARQENABLE_M;
ccq->reg.head_mask = VF_ARQH_ARQH_M;
@@ -129,7 +132,7 @@ free_reg_vals:
*/
static void idpf_vf_reset_reg_init(struct idpf_adapter *adapter)
{
- adapter->reset_reg.rstat = idpf_get_reg_addr(adapter, VFGEN_RSTAT);
+ adapter->reset_reg.rstat = idpf_get_rstat_reg_addr(adapter, VFGEN_RSTAT);
adapter->reset_reg.rstat_m = VFGEN_RSTAT_VFR_STATE_M;
}
@@ -148,6 +151,17 @@ static void idpf_vf_trigger_reset(struct idpf_adapter *adapter,
}
/**
+ * idpf_idc_vf_register - register for IDC callbacks
+ * @adapter: Driver specific private structure
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_idc_vf_register(struct idpf_adapter *adapter)
+{
+ return idpf_idc_init_aux_core_dev(adapter, IIDC_FUNCTION_TYPE_VF);
+}
+
+/**
* idpf_vf_reg_ops_init - Initialize register API function pointers
* @adapter: Driver specific private structure
*/
@@ -167,4 +181,11 @@ static void idpf_vf_reg_ops_init(struct idpf_adapter *adapter)
void idpf_vf_dev_ops_init(struct idpf_adapter *adapter)
{
idpf_vf_reg_ops_init(adapter);
+
+ adapter->dev_ops.idc_init = idpf_idc_vf_register;
+
+ resource_set_range(&adapter->dev_ops.static_reg_info[0],
+ VF_BASE, IDPF_VF_MBX_REGION_SZ);
+ resource_set_range(&adapter->dev_ops.static_reg_info[1],
+ VFGEN_RSTAT, IDPF_VF_RSTAT_REGION_SZ);
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index 24febaaa8fbb..a028c69f7fdc 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2023 Intel Corporation */
+#include <linux/export.h>
#include <net/libeth/rx.h>
#include "idpf.h"
@@ -849,14 +850,14 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL);
caps.rss_caps =
- cpu_to_le64(VIRTCHNL2_CAP_RSS_IPV4_TCP |
- VIRTCHNL2_CAP_RSS_IPV4_UDP |
- VIRTCHNL2_CAP_RSS_IPV4_SCTP |
- VIRTCHNL2_CAP_RSS_IPV4_OTHER |
- VIRTCHNL2_CAP_RSS_IPV6_TCP |
- VIRTCHNL2_CAP_RSS_IPV6_UDP |
- VIRTCHNL2_CAP_RSS_IPV6_SCTP |
- VIRTCHNL2_CAP_RSS_IPV6_OTHER);
+ cpu_to_le64(VIRTCHNL2_FLOW_IPV4_TCP |
+ VIRTCHNL2_FLOW_IPV4_UDP |
+ VIRTCHNL2_FLOW_IPV4_SCTP |
+ VIRTCHNL2_FLOW_IPV4_OTHER |
+ VIRTCHNL2_FLOW_IPV6_TCP |
+ VIRTCHNL2_FLOW_IPV6_UDP |
+ VIRTCHNL2_FLOW_IPV6_SCTP |
+ VIRTCHNL2_FLOW_IPV6_OTHER);
caps.hsplit_caps =
cpu_to_le32(VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 |
@@ -868,6 +869,8 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
caps.other_caps =
cpu_to_le64(VIRTCHNL2_CAP_SRIOV |
+ VIRTCHNL2_CAP_RDMA |
+ VIRTCHNL2_CAP_LAN_MEMORY_REGIONS |
VIRTCHNL2_CAP_MACFILTER |
VIRTCHNL2_CAP_SPLITQ_QSCHED |
VIRTCHNL2_CAP_PROMISC |
@@ -891,6 +894,163 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
}
/**
+ * idpf_send_get_lan_memory_regions - Send virtchnl get LAN memory regions msg
+ * @adapter: Driver specific private struct
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_send_get_lan_memory_regions(struct idpf_adapter *adapter)
+{
+ struct virtchnl2_get_lan_memory_regions *rcvd_regions __free(kfree);
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS,
+ .recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN,
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ int num_regions, size;
+ struct idpf_hw *hw;
+ ssize_t reply_sz;
+ int err = 0;
+
+ rcvd_regions = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!rcvd_regions)
+ return -ENOMEM;
+
+ xn_params.recv_buf.iov_base = rcvd_regions;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+
+ num_regions = le16_to_cpu(rcvd_regions->num_memory_regions);
+ size = struct_size(rcvd_regions, mem_reg, num_regions);
+ if (reply_sz < size)
+ return -EIO;
+
+ if (size > IDPF_CTLQ_MAX_BUF_LEN)
+ return -EINVAL;
+
+ hw = &adapter->hw;
+ hw->lan_regs = kcalloc(num_regions, sizeof(*hw->lan_regs), GFP_KERNEL);
+ if (!hw->lan_regs)
+ return -ENOMEM;
+
+ for (int i = 0; i < num_regions; i++) {
+ hw->lan_regs[i].addr_len =
+ le64_to_cpu(rcvd_regions->mem_reg[i].size);
+ hw->lan_regs[i].addr_start =
+ le64_to_cpu(rcvd_regions->mem_reg[i].start_offset);
+ }
+ hw->num_lan_regs = num_regions;
+
+ return err;
+}
+
+/**
+ * idpf_calc_remaining_mmio_regs - calculate MMIO regions outside mbx and rstat
+ * @adapter: Driver specific private structure
+ *
+ * Called when idpf_send_get_lan_memory_regions is not supported. This will
+ * calculate the offsets and sizes for the regions before, in between, and
+ * after the mailbox and rstat MMIO mappings.
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_calc_remaining_mmio_regs(struct idpf_adapter *adapter)
+{
+ struct resource *rstat_reg = &adapter->dev_ops.static_reg_info[1];
+ struct resource *mbx_reg = &adapter->dev_ops.static_reg_info[0];
+ struct idpf_hw *hw = &adapter->hw;
+
+ hw->num_lan_regs = IDPF_MMIO_MAP_FALLBACK_MAX_REMAINING;
+ hw->lan_regs = kcalloc(hw->num_lan_regs, sizeof(*hw->lan_regs),
+ GFP_KERNEL);
+ if (!hw->lan_regs)
+ return -ENOMEM;
+
+ /* Region preceding mailbox */
+ hw->lan_regs[0].addr_start = 0;
+ hw->lan_regs[0].addr_len = mbx_reg->start;
+ /* Region between mailbox and rstat */
+ hw->lan_regs[1].addr_start = mbx_reg->end + 1;
+ hw->lan_regs[1].addr_len = rstat_reg->start -
+ hw->lan_regs[1].addr_start;
+ /* Region after rstat */
+ hw->lan_regs[2].addr_start = rstat_reg->end + 1;
+ hw->lan_regs[2].addr_len = pci_resource_len(adapter->pdev, 0) -
+ hw->lan_regs[2].addr_start;
+
+ return 0;
+}
+
+/**
+ * idpf_map_lan_mmio_regs - map remaining LAN BAR regions
+ * @adapter: Driver specific private structure
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_map_lan_mmio_regs(struct idpf_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct idpf_hw *hw = &adapter->hw;
+ resource_size_t res_start;
+
+ res_start = pci_resource_start(pdev, 0);
+
+ for (int i = 0; i < hw->num_lan_regs; i++) {
+ resource_size_t start;
+ long len;
+
+ len = hw->lan_regs[i].addr_len;
+ if (!len)
+ continue;
+ start = hw->lan_regs[i].addr_start + res_start;
+
+ hw->lan_regs[i].vaddr = devm_ioremap(&pdev->dev, start, len);
+ if (!hw->lan_regs[i].vaddr) {
+ pci_err(pdev, "failed to allocate BAR0 region\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_add_del_fsteer_filters - Send virtchnl add/del Flow Steering message
+ * @adapter: adapter info struct
+ * @rule: Flow steering rule to add/delete
+ * @opcode: VIRTCHNL2_OP_ADD_FLOW_RULE to add filter, or
+ * VIRTCHNL2_OP_DEL_FLOW_RULE to delete. All other values are invalid.
+ *
+ * Send ADD/DELETE flow steering virtchnl message and receive the result.
+ *
+ * Return: 0 on success, negative on failure.
+ */
+int idpf_add_del_fsteer_filters(struct idpf_adapter *adapter,
+ struct virtchnl2_flow_rule_add_del *rule,
+ enum virtchnl2_op opcode)
+{
+ int rule_count = le32_to_cpu(rule->count);
+ struct idpf_vc_xn_params xn_params = {};
+ ssize_t reply_sz;
+
+ if (opcode != VIRTCHNL2_OP_ADD_FLOW_RULE &&
+ opcode != VIRTCHNL2_OP_DEL_FLOW_RULE)
+ return -EINVAL;
+
+ xn_params.vc_op = opcode;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.async = false;
+ xn_params.send_buf.iov_base = rule;
+ xn_params.send_buf.iov_len = struct_size(rule, rule_info, rule_count);
+ xn_params.recv_buf.iov_base = rule;
+ xn_params.recv_buf.iov_len = struct_size(rule, rule_info, rule_count);
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ return reply_sz < 0 ? reply_sz : 0;
+}
+
+/**
* idpf_vport_alloc_max_qs - Allocate max queues for a vport
* @adapter: Driver specific private structure
* @max_q: vport max queue structure
@@ -2801,7 +2961,7 @@ int idpf_init_dflt_mbx(struct idpf_adapter *adapter)
struct idpf_hw *hw = &adapter->hw;
int err;
- adapter->dev_ops.reg_ops.ctlq_reg_init(ctlq_info);
+ adapter->dev_ops.reg_ops.ctlq_reg_init(adapter, ctlq_info);
err = idpf_ctlq_init(hw, IDPF_NUM_DFLT_MBX_Q, ctlq_info);
if (err)
@@ -2961,6 +3121,30 @@ restart:
msleep(task_delay);
}
+ if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LAN_MEMORY_REGIONS)) {
+ err = idpf_send_get_lan_memory_regions(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to get LAN memory regions: %d\n",
+ err);
+ return -EINVAL;
+ }
+ } else {
+ /* Fallback to mapping the remaining regions of the entire BAR */
+ err = idpf_calc_remaining_mmio_regs(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to allocate BAR0 region(s): %d\n",
+ err);
+ return -ENOMEM;
+ }
+ }
+
+ err = idpf_map_lan_mmio_regs(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to map BAR0 region(s): %d\n",
+ err);
+ return -ENOMEM;
+ }
+
pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter));
num_max_vports = idpf_get_max_vports(adapter);
adapter->max_vports = num_max_vports;
@@ -3070,6 +3254,7 @@ void idpf_vc_core_deinit(struct idpf_adapter *adapter)
idpf_ptp_release(adapter);
idpf_deinit_task(adapter);
+ idpf_idc_deinit_core_aux_device(adapter->cdev_info);
idpf_intr_rel(adapter);
if (remove_in_prog)
@@ -3493,6 +3678,79 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
}
/**
+ * idpf_vport_is_cap_ena - Check if vport capability is enabled
+ * @vport: Private data struct
+ * @flag: flag(s) to check
+ *
+ * Return: true if the capability is supported, false otherwise
+ */
+bool idpf_vport_is_cap_ena(struct idpf_vport *vport, u16 flag)
+{
+ struct virtchnl2_create_vport *vport_msg;
+
+ vport_msg = vport->adapter->vport_params_recvd[vport->idx];
+
+ return !!(le16_to_cpu(vport_msg->vport_flags) & flag);
+}
+
+/**
+ * idpf_sideband_flow_type_ena - Check if steering is enabled for flow type
+ * @vport: Private data struct
+ * @flow_type: flow type to check (from ethtool.h)
+ *
+ * Return: true if sideband filters are allowed for @flow_type, false otherwise
+ */
+bool idpf_sideband_flow_type_ena(struct idpf_vport *vport, u32 flow_type)
+{
+ struct virtchnl2_create_vport *vport_msg;
+ __le64 caps;
+
+ vport_msg = vport->adapter->vport_params_recvd[vport->idx];
+ caps = vport_msg->sideband_flow_caps;
+
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ return !!(caps & cpu_to_le64(VIRTCHNL2_FLOW_IPV4_TCP));
+ case UDP_V4_FLOW:
+ return !!(caps & cpu_to_le64(VIRTCHNL2_FLOW_IPV4_UDP));
+ default:
+ return false;
+ }
+}
+
+/**
+ * idpf_sideband_action_ena - Check if steering is enabled for action
+ * @vport: Private data struct
+ * @fsp: flow spec
+ *
+ * Return: true if sideband filters are allowed for @fsp, false otherwise
+ */
+bool idpf_sideband_action_ena(struct idpf_vport *vport,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ struct virtchnl2_create_vport *vport_msg;
+ unsigned int supp_actions;
+
+ vport_msg = vport->adapter->vport_params_recvd[vport->idx];
+ supp_actions = le32_to_cpu(vport_msg->sideband_flow_actions);
+
+ /* Actions Drop/Wake are not supported */
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC ||
+ fsp->ring_cookie == RX_CLS_FLOW_WAKE)
+ return false;
+
+ return !!(supp_actions & VIRTCHNL2_ACTION_QUEUE);
+}
+
+unsigned int idpf_fsteer_max_rules(struct idpf_vport *vport)
+{
+ struct virtchnl2_create_vport *vport_msg;
+
+ vport_msg = vport->adapter->vport_params_recvd[vport->idx];
+ return le32_to_cpu(vport_msg->flow_steer_max_rules);
+}
+
+/**
* idpf_get_vport_id: Get vport id
* @vport: virtual port structure
*
@@ -3728,3 +3986,42 @@ int idpf_set_promiscuous(struct idpf_adapter *adapter,
return reply_sz < 0 ? reply_sz : 0;
}
+
+/**
+ * idpf_idc_rdma_vc_send_sync - virtchnl send callback for IDC registered drivers
+ * @cdev_info: IDC core device info pointer
+ * @send_msg: message to send
+ * @msg_size: size of message to send
+ * @recv_msg: message to populate on reception of response
+ * @recv_len: length of message copied into recv_msg or 0 on error
+ *
+ * Return: 0 on success or error code on failure.
+ */
+int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info,
+ u8 *send_msg, u16 msg_size,
+ u8 *recv_msg, u16 *recv_len)
+{
+ struct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev);
+ struct idpf_vc_xn_params xn_params = { };
+ ssize_t reply_sz;
+ u16 recv_size;
+
+ if (!recv_msg || !recv_len || msg_size > IDPF_CTLQ_MAX_BUF_LEN)
+ return -EINVAL;
+
+ recv_size = min_t(u16, *recv_len, IDPF_CTLQ_MAX_BUF_LEN);
+ *recv_len = 0;
+ xn_params.vc_op = VIRTCHNL2_OP_RDMA;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = send_msg;
+ xn_params.send_buf.iov_len = msg_size;
+ xn_params.recv_buf.iov_base = recv_msg;
+ xn_params.recv_buf.iov_len = recv_size;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ *recv_len = reply_sz;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(idpf_idc_rdma_vc_send_sync);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
index 77578206bada..86f30f0db07a 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
@@ -105,6 +105,12 @@ int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
int idpf_queue_reg_init(struct idpf_vport *vport);
int idpf_vport_queue_ids_init(struct idpf_vport *vport);
+bool idpf_vport_is_cap_ena(struct idpf_vport *vport, u16 flag);
+bool idpf_sideband_flow_type_ena(struct idpf_vport *vport, u32 flow_type);
+bool idpf_sideband_action_ena(struct idpf_vport *vport,
+ struct ethtool_rx_flow_spec *fsp);
+unsigned int idpf_fsteer_max_rules(struct idpf_vport *vport);
+
int idpf_recv_mb_msg(struct idpf_adapter *adapter);
int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
u16 msg_size, u8 *msg, u16 cookie);
@@ -151,5 +157,8 @@ int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get);
int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get);
void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr);
+int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info,
+ u8 *send_msg, u16 msg_size,
+ u8 *recv_msg, u16 *recv_len);
#endif /* _IDPF_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c
index bdcc54a5fb56..4f1fb0cefe51 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c
@@ -30,6 +30,7 @@ int idpf_ptp_get_caps(struct idpf_adapter *adapter)
.send_buf.iov_len = sizeof(send_ptp_caps_msg),
.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
};
+ struct virtchnl2_ptp_cross_time_reg_offsets cross_tstamp_offsets;
struct virtchnl2_ptp_clk_adj_reg_offsets clk_adj_offsets;
struct virtchnl2_ptp_clk_reg_offsets clock_offsets;
struct idpf_ptp_secondary_mbx *scnd_mbx;
@@ -71,7 +72,7 @@ int idpf_ptp_get_caps(struct idpf_adapter *adapter)
access_type = ptp->get_dev_clk_time_access;
if (access_type != IDPF_PTP_DIRECT)
- goto discipline_clock;
+ goto cross_tstamp;
clock_offsets = recv_ptp_caps_msg->clk_offsets;
@@ -90,6 +91,22 @@ int idpf_ptp_get_caps(struct idpf_adapter *adapter)
temp_offset = le32_to_cpu(clock_offsets.cmd_sync_trigger);
ptp->dev_clk_regs.cmd_sync = idpf_get_reg_addr(adapter, temp_offset);
+cross_tstamp:
+ access_type = ptp->get_cross_tstamp_access;
+ if (access_type != IDPF_PTP_DIRECT)
+ goto discipline_clock;
+
+ cross_tstamp_offsets = recv_ptp_caps_msg->cross_time_offsets;
+
+ temp_offset = le32_to_cpu(cross_tstamp_offsets.sys_time_ns_l);
+ ptp->dev_clk_regs.sys_time_ns_l = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(cross_tstamp_offsets.sys_time_ns_h);
+ ptp->dev_clk_regs.sys_time_ns_h = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(cross_tstamp_offsets.cmd_sync_trigger);
+ ptp->dev_clk_regs.cmd_sync = idpf_get_reg_addr(adapter, temp_offset);
+
discipline_clock:
access_type = ptp->adj_dev_clk_time_access;
if (access_type != IDPF_PTP_DIRECT)
@@ -163,6 +180,42 @@ int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter,
}
/**
+ * idpf_ptp_get_cross_time - Send virtchnl get cross time message
+ * @adapter: Driver specific private structure
+ * @cross_time: Pointer to the device clock structure where the value is set
+ *
+ * Send virtchnl get cross time message to get the time of the clock and the
+ * system time.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_get_cross_time(struct idpf_adapter *adapter,
+ struct idpf_ptp_dev_timers *cross_time)
+{
+ struct virtchnl2_ptp_get_cross_time cross_time_msg;
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_GET_CROSS_TIME,
+ .send_buf.iov_base = &cross_time_msg,
+ .send_buf.iov_len = sizeof(cross_time_msg),
+ .recv_buf.iov_base = &cross_time_msg,
+ .recv_buf.iov_len = sizeof(cross_time_msg),
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ int reply_sz;
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz != sizeof(cross_time_msg))
+ return -EIO;
+
+ cross_time->dev_clk_time_ns = le64_to_cpu(cross_time_msg.dev_time_ns);
+ cross_time->sys_time_ns = le64_to_cpu(cross_time_msg.sys_time_ns);
+
+ return 0;
+}
+
+/**
* idpf_ptp_set_dev_clk_time - Send virtchnl set device time message
* @adapter: Driver specific private structure
* @time: New time value
diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2.h b/drivers/net/ethernet/intel/idpf/virtchnl2.h
index 11b8f6f05799..02ae447cc24a 100644
--- a/drivers/net/ethernet/intel/idpf/virtchnl2.h
+++ b/drivers/net/ethernet/intel/idpf/virtchnl2.h
@@ -62,8 +62,9 @@ enum virtchnl2_op {
VIRTCHNL2_OP_GET_PTYPE_INFO = 526,
/* Opcode 527 and 528 are reserved for VIRTCHNL2_OP_GET_PTYPE_ID and
* VIRTCHNL2_OP_GET_PTYPE_INFO_RAW.
- * Opcodes 529, 530, 531, 532 and 533 are reserved.
*/
+ VIRTCHNL2_OP_RDMA = 529,
+ /* Opcodes 530 through 533 are reserved. */
VIRTCHNL2_OP_LOOPBACK = 534,
VIRTCHNL2_OP_ADD_MAC_ADDR = 535,
VIRTCHNL2_OP_DEL_MAC_ADDR = 536,
@@ -78,6 +79,11 @@ enum virtchnl2_op {
VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE = 546,
VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME = 547,
VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS = 548,
+ VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS = 549,
+ /* Opcode 550 is reserved */
+ VIRTCHNL2_OP_ADD_FLOW_RULE = 551,
+ VIRTCHNL2_OP_GET_FLOW_RULE = 552,
+ VIRTCHNL2_OP_DEL_FLOW_RULE = 553,
};
/**
@@ -151,22 +157,22 @@ enum virtchnl2_cap_seg {
VIRTCHNL2_CAP_SEG_TX_DOUBLE_TUNNEL = BIT(8),
};
-/* Receive Side Scaling Flow type capability flags */
-enum virtchnl2_cap_rss {
- VIRTCHNL2_CAP_RSS_IPV4_TCP = BIT(0),
- VIRTCHNL2_CAP_RSS_IPV4_UDP = BIT(1),
- VIRTCHNL2_CAP_RSS_IPV4_SCTP = BIT(2),
- VIRTCHNL2_CAP_RSS_IPV4_OTHER = BIT(3),
- VIRTCHNL2_CAP_RSS_IPV6_TCP = BIT(4),
- VIRTCHNL2_CAP_RSS_IPV6_UDP = BIT(5),
- VIRTCHNL2_CAP_RSS_IPV6_SCTP = BIT(6),
- VIRTCHNL2_CAP_RSS_IPV6_OTHER = BIT(7),
- VIRTCHNL2_CAP_RSS_IPV4_AH = BIT(8),
- VIRTCHNL2_CAP_RSS_IPV4_ESP = BIT(9),
- VIRTCHNL2_CAP_RSS_IPV4_AH_ESP = BIT(10),
- VIRTCHNL2_CAP_RSS_IPV6_AH = BIT(11),
- VIRTCHNL2_CAP_RSS_IPV6_ESP = BIT(12),
- VIRTCHNL2_CAP_RSS_IPV6_AH_ESP = BIT(13),
+/* Receive Side Scaling and Flow Steering Flow type capability flags */
+enum virtchnl2_flow_types {
+ VIRTCHNL2_FLOW_IPV4_TCP = BIT(0),
+ VIRTCHNL2_FLOW_IPV4_UDP = BIT(1),
+ VIRTCHNL2_FLOW_IPV4_SCTP = BIT(2),
+ VIRTCHNL2_FLOW_IPV4_OTHER = BIT(3),
+ VIRTCHNL2_FLOW_IPV6_TCP = BIT(4),
+ VIRTCHNL2_FLOW_IPV6_UDP = BIT(5),
+ VIRTCHNL2_FLOW_IPV6_SCTP = BIT(6),
+ VIRTCHNL2_FLOW_IPV6_OTHER = BIT(7),
+ VIRTCHNL2_FLOW_IPV4_AH = BIT(8),
+ VIRTCHNL2_FLOW_IPV4_ESP = BIT(9),
+ VIRTCHNL2_FLOW_IPV4_AH_ESP = BIT(10),
+ VIRTCHNL2_FLOW_IPV6_AH = BIT(11),
+ VIRTCHNL2_FLOW_IPV6_ESP = BIT(12),
+ VIRTCHNL2_FLOW_IPV6_AH_ESP = BIT(13),
};
/* Header split capability flags */
@@ -192,8 +198,9 @@ enum virtchnl2_cap_other {
VIRTCHNL2_CAP_RDMA = BIT_ULL(0),
VIRTCHNL2_CAP_SRIOV = BIT_ULL(1),
VIRTCHNL2_CAP_MACFILTER = BIT_ULL(2),
- VIRTCHNL2_CAP_FLOW_DIRECTOR = BIT_ULL(3),
- /* Queue based scheduling using split queue model */
+ /* Other capability 3 is available
+ * Queue based scheduling using split queue model
+ */
VIRTCHNL2_CAP_SPLITQ_QSCHED = BIT_ULL(4),
VIRTCHNL2_CAP_CRC = BIT_ULL(5),
VIRTCHNL2_CAP_ADQ = BIT_ULL(6),
@@ -207,16 +214,37 @@ enum virtchnl2_cap_other {
/* EDT: Earliest Departure Time capability used for Timing Wheel */
VIRTCHNL2_CAP_EDT = BIT_ULL(14),
VIRTCHNL2_CAP_ADV_RSS = BIT_ULL(15),
- VIRTCHNL2_CAP_FDIR = BIT_ULL(16),
+ /* Other capability 16 is available */
VIRTCHNL2_CAP_RX_FLEX_DESC = BIT_ULL(17),
VIRTCHNL2_CAP_PTYPE = BIT_ULL(18),
VIRTCHNL2_CAP_LOOPBACK = BIT_ULL(19),
/* Other capability 20 is reserved */
+ VIRTCHNL2_CAP_FLOW_STEER = BIT_ULL(21),
+ VIRTCHNL2_CAP_LAN_MEMORY_REGIONS = BIT_ULL(22),
/* this must be the last capability */
VIRTCHNL2_CAP_OEM = BIT_ULL(63),
};
+/**
+ * enum virtchnl2_action_types - Available actions for sideband flow steering
+ * @VIRTCHNL2_ACTION_DROP: Drop the packet
+ * @VIRTCHNL2_ACTION_PASSTHRU: Forward the packet to the next classifier/stage
+ * @VIRTCHNL2_ACTION_QUEUE: Forward the packet to a receive queue
+ * @VIRTCHNL2_ACTION_Q_GROUP: Forward the packet to a receive queue group
+ * @VIRTCHNL2_ACTION_MARK: Mark the packet with specific marker value
+ * @VIRTCHNL2_ACTION_COUNT: Increment the corresponding counter
+ */
+
+enum virtchnl2_action_types {
+ VIRTCHNL2_ACTION_DROP = BIT(0),
+ VIRTCHNL2_ACTION_PASSTHRU = BIT(1),
+ VIRTCHNL2_ACTION_QUEUE = BIT(2),
+ VIRTCHNL2_ACTION_Q_GROUP = BIT(3),
+ VIRTCHNL2_ACTION_MARK = BIT(4),
+ VIRTCHNL2_ACTION_COUNT = BIT(5),
+};
+
/* underlying device type */
enum virtchl2_device_type {
VIRTCHNL2_MEV_DEVICE = 0,
@@ -458,7 +486,7 @@ VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_version_info);
* @seg_caps: See enum virtchnl2_cap_seg.
* @hsplit_caps: See enum virtchnl2_cap_rx_hsplit_at.
* @rsc_caps: See enum virtchnl2_cap_rsc.
- * @rss_caps: See enum virtchnl2_cap_rss.
+ * @rss_caps: See enum virtchnl2_flow_types.
* @other_caps: See enum virtchnl2_cap_other.
* @mailbox_dyn_ctl: DYN_CTL register offset and vector id for mailbox
* provided by CP.
@@ -483,6 +511,8 @@ VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_version_info);
* segment offload.
* @max_hdr_buf_per_lso: Max number of header buffers that can be used for
* an LSO.
+ * @num_rdma_allocated_vectors: Maximum number of allocated RDMA vectors for
+ * the device.
* @pad1: Padding for future extensions.
*
* Dataplane driver sends this message to CP to negotiate capabilities and
@@ -530,7 +560,8 @@ struct virtchnl2_get_capabilities {
__le32 device_type;
u8 min_sso_packet_len;
u8 max_hdr_buf_per_lso;
- u8 pad1[10];
+ __le16 num_rdma_allocated_vectors;
+ u8 pad1[8];
};
VIRTCHNL2_CHECK_STRUCT_LEN(80, virtchnl2_get_capabilities);
@@ -572,9 +603,18 @@ VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks);
/**
* enum virtchnl2_vport_flags - Vport flags that indicate vport capabilities.
* @VIRTCHNL2_VPORT_UPLINK_PORT: Representatives of underlying physical ports
+ * @VIRTCHNL2_VPORT_INLINE_FLOW_STEER: Inline flow steering enabled
+ * @VIRTCHNL2_VPORT_INLINE_FLOW_STEER_RXQ: Inline flow steering enabled
+ * with explicit Rx queue action
+ * @VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER: Sideband flow steering enabled
+ * @VIRTCHNL2_VPORT_ENABLE_RDMA: RDMA is enabled for this vport
*/
enum virtchnl2_vport_flags {
- VIRTCHNL2_VPORT_UPLINK_PORT = BIT(0),
+ VIRTCHNL2_VPORT_UPLINK_PORT = BIT(0),
+ VIRTCHNL2_VPORT_INLINE_FLOW_STEER = BIT(1),
+ VIRTCHNL2_VPORT_INLINE_FLOW_STEER_RXQ = BIT(2),
+ VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER = BIT(3),
+ VIRTCHNL2_VPORT_ENABLE_RDMA = BIT(4),
};
/**
@@ -599,6 +639,14 @@ enum virtchnl2_vport_flags {
* @rx_desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions.
* @tx_desc_ids: See VIRTCHNL2_TX_DESC_IDS definitions.
* @pad1: Padding.
+ * @inline_flow_caps: Bit mask of supported inline-flow-steering
+ * flow types (See enum virtchnl2_flow_types)
+ * @sideband_flow_caps: Bit mask of supported sideband-flow-steering
+ * flow types (See enum virtchnl2_flow_types)
+ * @sideband_flow_actions: Bit mask of supported action types
+ * for sideband flow steering (See enum virtchnl2_action_types)
+ * @flow_steer_max_rules: Max rules allowed for inline and sideband
+ * flow steering combined
* @rss_algorithm: RSS algorithm.
* @rss_key_size: RSS key size.
* @rss_lut_size: RSS LUT size.
@@ -631,7 +679,11 @@ struct virtchnl2_create_vport {
__le16 vport_flags;
__le64 rx_desc_ids;
__le64 tx_desc_ids;
- u8 pad1[72];
+ u8 pad1[48];
+ __le64 inline_flow_caps;
+ __le64 sideband_flow_caps;
+ __le32 sideband_flow_actions;
+ __le32 flow_steer_max_rules;
__le32 rss_algorithm;
__le16 rss_key_size;
__le16 rss_lut_size;
@@ -1580,4 +1632,182 @@ struct virtchnl2_ptp_adj_dev_clk_time {
};
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_adj_dev_clk_time);
+/**
+ * struct virtchnl2_mem_region - MMIO memory region
+ * @start_offset: starting offset of the MMIO memory region
+ * @size: size of the MMIO memory region
+ */
+struct virtchnl2_mem_region {
+ __le64 start_offset;
+ __le64 size;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_mem_region);
+
+/**
+ * struct virtchnl2_get_lan_memory_regions - List of LAN MMIO memory regions
+ * @num_memory_regions: number of memory regions
+ * @pad: Padding
+ * @mem_reg: List with memory region info
+ *
+ * PF/VF sends this message to learn what LAN MMIO memory regions it should map.
+ */
+struct virtchnl2_get_lan_memory_regions {
+ __le16 num_memory_regions;
+ u8 pad[6];
+ struct virtchnl2_mem_region mem_reg[];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_get_lan_memory_regions);
+
+#define VIRTCHNL2_MAX_NUM_PROTO_HDRS 4
+#define VIRTCHNL2_MAX_SIZE_RAW_PACKET 256
+#define VIRTCHNL2_MAX_NUM_ACTIONS 8
+
+/**
+ * struct virtchnl2_proto_hdr - represent one protocol header
+ * @hdr_type: See enum virtchnl2_proto_hdr_type
+ * @pad: padding
+ * @buffer_spec: binary buffer based on header type.
+ * @buffer_mask: mask applied on buffer_spec.
+ *
+ * Structure to hold protocol headers based on hdr_type
+ */
+struct virtchnl2_proto_hdr {
+ __le32 hdr_type;
+ u8 pad[4];
+ u8 buffer_spec[64];
+ u8 buffer_mask[64];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(136, virtchnl2_proto_hdr);
+
+/**
+ * struct virtchnl2_proto_hdrs - struct to represent match criteria
+ * @tunnel_level: specify where protocol header(s) start from.
+ * must be 0 when sending a raw packet request.
+ * 0 - from the outer layer
+ * 1 - from the first inner layer
+ * 2 - from the second inner layer
+ * @pad: Padding bytes
+ * @count: total number of protocol headers in proto_hdr. 0 for raw packet.
+ * @proto_hdr: Array of protocol headers
+ * @raw: struct holding raw packet buffer when count is 0
+ */
+struct virtchnl2_proto_hdrs {
+ u8 tunnel_level;
+ u8 pad[3];
+ __le32 count;
+ union {
+ struct virtchnl2_proto_hdr
+ proto_hdr[VIRTCHNL2_MAX_NUM_PROTO_HDRS];
+ struct {
+ __le16 pkt_len;
+ u8 spec[VIRTCHNL2_MAX_SIZE_RAW_PACKET];
+ u8 mask[VIRTCHNL2_MAX_SIZE_RAW_PACKET];
+ } raw;
+ };
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(552, virtchnl2_proto_hdrs);
+
+/**
+ * struct virtchnl2_rule_action - struct representing single action for a flow
+ * @action_type: see enum virtchnl2_action_types
+ * @act_conf: union representing action depending on action_type.
+ * @act_conf.q_id: queue id to redirect the packets to.
+ * @act_conf.q_grp_id: queue group id to redirect the packets to.
+ * @act_conf.ctr_id: used for count action. If input value 0xFFFFFFFF control
+ * plane assigns a new counter and returns the counter ID to
+ * the driver. If input value is not 0xFFFFFFFF then it must
+ * be an existing counter given to the driver for an earlier
+ * flow. Then this flow will share the counter.
+ * @act_conf.mark_id: Value used to mark the packets. Used for mark action.
+ * @act_conf.reserved: Reserved for future use.
+ */
+struct virtchnl2_rule_action {
+ __le32 action_type;
+ union {
+ __le32 q_id;
+ __le32 q_grp_id;
+ __le32 ctr_id;
+ __le32 mark_id;
+ u8 reserved[8];
+ } act_conf;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(12, virtchnl2_rule_action);
+
+/**
+ * struct virtchnl2_rule_action_set - struct representing multiple actions
+ * @count: number of valid actions in the action set of a rule
+ * @actions: array of struct virtchnl2_rule_action
+ */
+struct virtchnl2_rule_action_set {
+ /* action count must be less than VIRTCHNL2_MAX_NUM_ACTIONS */
+ __le32 count;
+ struct virtchnl2_rule_action actions[VIRTCHNL2_MAX_NUM_ACTIONS];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(100, virtchnl2_rule_action_set);
+
+/**
+ * struct virtchnl2_flow_rule - represent one flow steering rule
+ * @proto_hdrs: array of protocol header buffers representing match criteria
+ * @action_set: series of actions to be applied for given rule
+ * @priority: rule priority.
+ * @pad: padding for future extensions.
+ */
+struct virtchnl2_flow_rule {
+ struct virtchnl2_proto_hdrs proto_hdrs;
+ struct virtchnl2_rule_action_set action_set;
+ __le32 priority;
+ u8 pad[8];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(664, virtchnl2_flow_rule);
+
+enum virtchnl2_flow_rule_status {
+ VIRTCHNL2_FLOW_RULE_SUCCESS = 1,
+ VIRTCHNL2_FLOW_RULE_NORESOURCE = 2,
+ VIRTCHNL2_FLOW_RULE_EXIST = 3,
+ VIRTCHNL2_FLOW_RULE_TIMEOUT = 4,
+ VIRTCHNL2_FLOW_RULE_FLOW_TYPE_NOT_SUPPORTED = 5,
+ VIRTCHNL2_FLOW_RULE_MATCH_KEY_NOT_SUPPORTED = 6,
+ VIRTCHNL2_FLOW_RULE_ACTION_NOT_SUPPORTED = 7,
+ VIRTCHNL2_FLOW_RULE_ACTION_COMBINATION_INVALID = 8,
+ VIRTCHNL2_FLOW_RULE_ACTION_DATA_INVALID = 9,
+ VIRTCHNL2_FLOW_RULE_NOT_ADDED = 10,
+};
+
+/**
+ * struct virtchnl2_flow_rule_info: structure representing single flow rule
+ * @rule_id: rule_id associated with the flow_rule.
+ * @rule_cfg: structure representing rule.
+ * @status: status of rule programming. See enum virtchnl2_flow_rule_status.
+ */
+struct virtchnl2_flow_rule_info {
+ __le32 rule_id;
+ struct virtchnl2_flow_rule rule_cfg;
+ __le32 status;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(672, virtchnl2_flow_rule_info);
+
+/**
+ * struct virtchnl2_flow_rule_add_del - add/delete a flow steering rule
+ * @vport_id: vport id for which the rule is to be added or deleted.
+ * @count: Indicates number of rules to be added or deleted.
+ * @rule_info: Array of flow rules to be added or deleted.
+ *
+ * For VIRTCHNL2_OP_FLOW_RULE_ADD, rule_info contains list of rules to be
+ * added. If rule_id is 0xFFFFFFFF, then the rule is programmed and not cached.
+ *
+ * For VIRTCHNL2_OP_FLOW_RULE_DEL, there are two possibilities. The structure
+ * can contain either array of rule_ids or array of match keys to be deleted.
+ * When match keys are used the corresponding rule_ids must be 0xFFFFFFFF.
+ *
+ * status member of each rule indicates the result. Maximum of 6 rules can be
+ * added or deleted using this method. Driver has to retry in case of any
+ * failure of ADD or DEL opcode. CP doesn't retry in case of failure.
+ */
+struct virtchnl2_flow_rule_add_del {
+ __le32 vport_id;
+ __le32 count;
+ struct virtchnl2_flow_rule_info rule_info[] __counted_by_le(count);
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_flow_rule_add_del);
+
#endif /* _VIRTCHNL_2_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index f34ead8243e9..c3f4f7cd264e 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -626,7 +626,7 @@ struct igb_adapter {
struct delayed_work ptp_overflow_work;
struct work_struct ptp_tx_work;
struct sk_buff *ptp_tx_skb;
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
unsigned long ptp_tx_start;
unsigned long last_rx_ptp_check;
unsigned long last_rx_timestamp;
@@ -771,8 +771,11 @@ void igb_ptp_tx_hang(struct igb_adapter *adapter);
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
ktime_t *timestamp);
-int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
-int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
+int igb_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int igb_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
unsigned int igb_get_max_rss_queues(struct igb_adapter *);
#ifdef CONFIG_IGB_HWMON
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index ca6ccbc13954..92ef33459aec 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2500,9 +2500,11 @@ static int igb_get_ethtool_nfc_all(struct igb_adapter *adapter,
return 0;
}
-static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+static int igb_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct igb_adapter *adapter = netdev_priv(dev);
+
cmd->data = 0;
/* Report default options for RSS on igb */
@@ -2563,9 +2565,6 @@ static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRLALL:
ret = igb_get_ethtool_nfc_all(adapter, cmd, rule_locs);
break;
- case ETHTOOL_GRXFH:
- ret = igb_get_rss_hash_opts(adapter, cmd);
- break;
default:
break;
}
@@ -2575,9 +2574,11 @@ static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \
IGB_FLAG_RSS_FIELD_IPV6_UDP)
-static int igb_set_rss_hash_opt(struct igb_adapter *adapter,
- struct ethtool_rxnfc *nfc)
+static int igb_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct igb_adapter *adapter = netdev_priv(dev);
u32 flags = adapter->flags;
/* RSS does not support anything other than hashing
@@ -3005,9 +3006,6 @@ static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = igb_set_rss_hash_opt(adapter, cmd);
- break;
case ETHTOOL_SRXCLSRLINS:
ret = igb_add_ethtool_nfc_entry(adapter, cmd);
break;
@@ -3485,6 +3483,8 @@ static const struct ethtool_ops igb_ethtool_ops = {
.get_rxfh_indir_size = igb_get_rxfh_indir_size,
.get_rxfh = igb_get_rxfh,
.set_rxfh = igb_set_rxfh,
+ .get_rxfh_fields = igb_get_rxfh_fields,
+ .set_rxfh_fields = igb_set_rxfh_fields,
.get_channels = igb_get_channels,
.set_channels = igb_set_channels,
.get_priv_flags = igb_get_priv_flags,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index b76a154e635e..a9a7a94ae61e 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3062,6 +3062,8 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_bpf = igb_xdp,
.ndo_xdp_xmit = igb_xdp_xmit,
.ndo_xsk_wakeup = igb_xsk_wakeup,
+ .ndo_hwtstamp_get = igb_ptp_hwtstamp_get,
+ .ndo_hwtstamp_set = igb_ptp_hwtstamp_set,
};
/**
@@ -9317,10 +9319,6 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case SIOCGMIIREG:
case SIOCSMIIREG:
return igb_mii_ioctl(netdev, ifr, cmd);
- case SIOCGHWTSTAMP:
- return igb_ptp_get_ts_config(netdev, ifr);
- case SIOCSHWTSTAMP:
- return igb_ptp_set_ts_config(netdev, ifr);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 793c96016288..a7876882aeaf 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -73,7 +73,7 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
static void igb_ptp_sdp_init(struct igb_adapter *adapter);
/* SYSTIM read access for the 82576 */
-static u64 igb_ptp_read_82576(const struct cyclecounter *cc)
+static u64 igb_ptp_read_82576(struct cyclecounter *cc)
{
struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
struct e1000_hw *hw = &igb->hw;
@@ -90,7 +90,7 @@ static u64 igb_ptp_read_82576(const struct cyclecounter *cc)
}
/* SYSTIM read access for the 82580 */
-static u64 igb_ptp_read_82580(const struct cyclecounter *cc)
+static u64 igb_ptp_read_82580(struct cyclecounter *cc)
{
struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
struct e1000_hw *hw = &igb->hw;
@@ -1094,21 +1094,22 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
}
/**
- * igb_ptp_get_ts_config - get hardware time stamping config
+ * igb_ptp_hwtstamp_get - get hardware time stamping config
* @netdev: netdev struct
- * @ifr: interface struct
+ * @config: timestamping configuration structure
*
* Get the hwtstamp_config settings to return to the user. Rather than attempt
* to deconstruct the settings from the registers, just return a shadow copy
* of the last known settings.
**/
-int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
+int igb_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- struct hwtstamp_config *config = &adapter->tstamp_config;
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
+ *config = adapter->tstamp_config;
+
+ return 0;
}
/**
@@ -1129,7 +1130,7 @@ int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
* level 2 or 4".
*/
static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
struct e1000_hw *hw = &adapter->hw;
u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
@@ -1275,30 +1276,26 @@ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
}
/**
- * igb_ptp_set_ts_config - set hardware time stamping config
+ * igb_ptp_hwtstamp_set - set hardware time stamping config
* @netdev: netdev struct
- * @ifr: interface struct
- *
+ * @config: timestamping configuration structure
+ * @extack: netlink extended ack structure for error reporting
**/
-int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
+int igb_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- struct hwtstamp_config config;
int err;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = igb_ptp_set_timestamp_mode(adapter, &config);
+ err = igb_ptp_set_timestamp_mode(adapter, config);
if (err)
return err;
/* save these settings for future reference */
- memcpy(&adapter->tstamp_config, &config,
- sizeof(adapter->tstamp_config));
+ adapter->tstamp_config = *config;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/igb/igb_xsk.c b/drivers/net/ethernet/intel/igb/igb_xsk.c
index 5cf67ba29269..30ce5fbb5b77 100644
--- a/drivers/net/ethernet/intel/igb/igb_xsk.c
+++ b/drivers/net/ethernet/intel/igb/igb_xsk.c
@@ -482,7 +482,7 @@ bool igb_xmit_zc(struct igb_ring *tx_ring, struct xsk_buff_pool *xsk_pool)
if (!nb_pkts)
return true;
- while (nb_pkts-- > 0) {
+ for (; i < nb_pkts; i++) {
dma = xsk_buff_raw_get_dma(xsk_pool, descs[i].addr);
xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, descs[i].len);
@@ -512,7 +512,6 @@ bool igb_xmit_zc(struct igb_ring *tx_ring, struct xsk_buff_pool *xsk_pool)
total_bytes += descs[i].len;
- i++;
tx_ring->next_to_use++;
tx_buffer_info->next_to_watch = tx_desc;
if (tx_ring->next_to_use == tx_ring->count)
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 83b97989a6bd..773895c663fd 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -33,6 +33,7 @@ static const struct igbvf_stats igbvf_gstrings_stats[] = {
{ "lbrx_bytes", IGBVF_STAT(stats.gorlbc, stats.base_gorlbc) },
{ "lbrx_packets", IGBVF_STAT(stats.gprlbc, stats.base_gprlbc) },
{ "tx_restart_queue", IGBVF_STAT(restart_queue, zero_base) },
+ { "tx_timeout_count", IGBVF_STAT(tx_timeout_count, zero_base) },
{ "rx_long_byte_count", IGBVF_STAT(stats.gorc, stats.base_gorc) },
{ "rx_csum_offload_good", IGBVF_STAT(hw_csum_good, zero_base) },
{ "rx_csum_offload_errors", IGBVF_STAT(hw_csum_err, zero_base) },
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index ca6e44245a7b..da8e1fd47301 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -154,7 +154,6 @@ struct igbvf_ring {
/* board specific private data structure */
struct igbvf_adapter {
struct timer_list watchdog_timer;
- struct timer_list blink_timer;
struct work_struct reset_task;
struct work_struct watchdog_task;
@@ -162,10 +161,7 @@ struct igbvf_adapter {
const struct igbvf_info *ei;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- u32 bd_number;
u32 rx_buffer_len;
- u32 polling_interval;
- u16 mng_vlan_id;
u16 link_speed;
u16 link_duplex;
@@ -183,9 +179,6 @@ struct igbvf_adapter {
unsigned int restart_queue;
u32 txd_cmd;
- u32 tx_int_delay;
- u32 tx_abs_int_delay;
-
unsigned int total_tx_bytes;
unsigned int total_tx_packets;
unsigned int total_rx_bytes;
@@ -193,23 +186,15 @@ struct igbvf_adapter {
/* Tx stats */
u32 tx_timeout_count;
- u32 tx_fifo_head;
- u32 tx_head_addr;
- u32 tx_fifo_size;
- u32 tx_dma_failed;
/* Rx */
struct igbvf_ring *rx_ring;
- u32 rx_int_delay;
- u32 rx_abs_int_delay;
-
/* Rx stats */
u64 hw_csum_err;
u64 hw_csum_good;
u64 rx_hdr_split;
u32 alloc_rx_buff_failed;
- u32 rx_dma_failed;
unsigned int rx_ps_hdr_size;
u32 max_frame_size;
@@ -229,26 +214,14 @@ struct igbvf_adapter {
struct e1000_vf_stats stats;
u64 zero_base;
- struct igbvf_ring test_tx_ring;
- struct igbvf_ring test_rx_ring;
- u32 test_icr;
-
u32 msg_enable;
struct msix_entry *msix_entries;
- int int_mode;
u32 eims_enable_mask;
u32 eims_other;
- u32 int_counter0;
- u32 int_counter1;
- u32 eeprom_wol;
u32 wol;
u32 pba;
- bool fc_autoneg;
-
- unsigned long led_status;
-
unsigned int flags;
unsigned long last_reset;
};
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index e55dd9345833..61dfcd8cb370 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -855,8 +855,6 @@ static irqreturn_t igbvf_msix_other(int irq, void *data)
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- adapter->int_counter1++;
-
hw->mac.get_link_status = 1;
if (!test_bit(__IGBVF_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
@@ -899,8 +897,6 @@ static irqreturn_t igbvf_intr_msix_rx(int irq, void *data)
struct net_device *netdev = data;
struct igbvf_adapter *adapter = netdev_priv(netdev);
- adapter->int_counter0++;
-
/* Write the ITR value calculated at the end of the
* previous interrupt.
*/
@@ -1633,10 +1629,6 @@ static int igbvf_sw_init(struct igbvf_adapter *adapter)
adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
- adapter->tx_int_delay = 8;
- adapter->tx_abs_int_delay = 32;
- adapter->rx_int_delay = 0;
- adapter->rx_abs_int_delay = 8;
adapter->requested_itr = 3;
adapter->current_itr = IGBVF_START_ITR;
@@ -2712,7 +2704,6 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct igbvf_adapter *adapter;
struct e1000_hw *hw;
const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data];
- static int cards_found;
int err;
err = pci_enable_device_mem(pdev);
@@ -2784,8 +2775,6 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->watchdog_timeo = 5 * HZ;
strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
- adapter->bd_number = cards_found++;
-
netdev->hw_features = NETIF_F_SG |
NETIF_F_TSO |
NETIF_F_TSO6 |
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 859a15e4ccba..266bfcf2a28f 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -43,6 +43,7 @@ void igc_ethtool_set_ops(struct net_device *);
struct igc_fpe_t {
struct ethtool_mmsv mmsv;
u32 tx_min_frag_size;
+ bool tx_enabled;
};
enum igc_mac_filter_type {
@@ -163,6 +164,7 @@ struct igc_ring {
bool launchtime_enable; /* true if LaunchTime is enabled */
ktime_t last_tx_cycle; /* end of the cycle with a launchtime transmission */
ktime_t last_ff_cycle; /* Last cycle with an active first flag */
+ bool preemptible; /* True if preemptible queue, false if express queue */
u32 start_time;
u32 end_time;
@@ -313,7 +315,7 @@ struct igc_adapter {
*/
spinlock_t ptp_tx_lock;
struct igc_tx_timestamp_request tx_tstamp[IGC_MAX_TX_TSTAMP_REGS];
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
unsigned int ptp_flags;
/* System time value lock */
spinlock_t tmreg_lock;
@@ -395,6 +397,7 @@ extern char igc_driver_name[];
#define IGC_FLAG_TSN_QBV_ENABLED BIT(17)
#define IGC_FLAG_TSN_QAV_ENABLED BIT(18)
#define IGC_FLAG_TSN_PREEMPT_ENABLED BIT(19)
+#define IGC_FLAG_TSN_REVERSE_TXQ_PRIO BIT(20)
#define IGC_FLAG_TSN_ANY_ENABLED \
(IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED | \
@@ -403,10 +406,6 @@ extern char igc_driver_name[];
#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
-#define IGC_MRQC_ENABLE_RSS_MQ 0x00000002
-#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
-#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
-
/* RX-desc Write-Back format RSS Type's */
enum igc_rss_type_num {
IGC_RSS_TYPE_NO_HASH = 0,
@@ -485,12 +484,30 @@ static inline u32 igc_rss_type(const union igc_adv_rx_desc *rx_desc)
* descriptors until either it has this many to write back, or the
* ITR timer expires.
*/
-#define IGC_RX_PTHRESH 8
-#define IGC_RX_HTHRESH 8
-#define IGC_TX_PTHRESH 8
-#define IGC_TX_HTHRESH 1
-#define IGC_RX_WTHRESH 4
-#define IGC_TX_WTHRESH 16
+#define IGC_RXDCTL_PTHRESH 8
+#define IGC_RXDCTL_HTHRESH 8
+#define IGC_RXDCTL_WTHRESH 4
+/* Ena specific Rx Queue */
+#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000
+/* Receive Software Flush */
+#define IGC_RXDCTL_SWFLUSH 0x04000000
+
+#define IGC_TXDCTL_PTHRESH_MASK GENMASK(4, 0)
+#define IGC_TXDCTL_HTHRESH_MASK GENMASK(12, 8)
+#define IGC_TXDCTL_WTHRESH_MASK GENMASK(20, 16)
+#define IGC_TXDCTL_QUEUE_ENABLE_MASK GENMASK(25, 25)
+#define IGC_TXDCTL_SWFLUSH_MASK GENMASK(26, 26)
+#define IGC_TXDCTL_PRIORITY_MASK GENMASK(27, 27)
+
+#define IGC_TXDCTL_PTHRESH(x) FIELD_PREP(IGC_TXDCTL_PTHRESH_MASK, (x))
+#define IGC_TXDCTL_HTHRESH(x) FIELD_PREP(IGC_TXDCTL_HTHRESH_MASK, (x))
+#define IGC_TXDCTL_WTHRESH(x) FIELD_PREP(IGC_TXDCTL_WTHRESH_MASK, (x))
+/* Ena specific Tx Queue */
+#define IGC_TXDCTL_QUEUE_ENABLE FIELD_PREP(IGC_TXDCTL_QUEUE_ENABLE_MASK, 1)
+/* Transmit Software Flush */
+#define IGC_TXDCTL_SWFLUSH FIELD_PREP(IGC_TXDCTL_SWFLUSH_MASK, 1)
+#define IGC_TXDCTL_PRIORITY(x) FIELD_PREP(IGC_TXDCTL_PRIORITY_MASK, (x))
+#define IGC_TXDCTL_PRIORITY_HIGH IGC_TXDCTL_PRIORITY(1)
#define IGC_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
@@ -614,6 +631,7 @@ enum igc_filter_match_flags {
IGC_FILTER_FLAG_DST_MAC_ADDR = BIT(3),
IGC_FILTER_FLAG_USER_DATA = BIT(4),
IGC_FILTER_FLAG_VLAN_ETYPE = BIT(5),
+ IGC_FILTER_FLAG_DEFAULT_QUEUE = BIT(6),
};
struct igc_nfc_filter {
@@ -641,10 +659,14 @@ struct igc_nfc_rule {
bool flex;
};
-/* IGC supports a total of 32 NFC rules: 16 MAC address based, 8 VLAN priority
- * based, 8 ethertype based and 32 Flex filter based rules.
+/* IGC supports a total of 65 NFC rules, listed below in order of priority:
+ * - 16 MAC address based filtering rules (highest priority)
+ * - 8 ethertype based filtering rules
+ * - 32 Flex filter based filtering rules
+ * - 8 VLAN priority based filtering rules
+ * - 1 default queue rule (lowest priority)
*/
-#define IGC_MAX_RXNFC_RULES 64
+#define IGC_MAX_RXNFC_RULES 65
struct igc_flex_filter {
u8 index;
@@ -752,8 +774,11 @@ void igc_ptp_reset(struct igc_adapter *adapter);
void igc_ptp_suspend(struct igc_adapter *adapter);
void igc_ptp_stop(struct igc_adapter *adapter);
ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf);
-int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
-int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
+int igc_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int igc_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void igc_ptp_tx_hang(struct igc_adapter *adapter);
void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts);
void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h
index 6320eabb72fe..eaf17cd031c3 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.h
+++ b/drivers/net/ethernet/intel/igc/igc_base.h
@@ -86,14 +86,6 @@ union igc_adv_rx_desc {
} wb; /* writeback */
};
-/* Additional Transmit Descriptor Control definitions */
-#define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */
-#define IGC_TXDCTL_SWFLUSH 0x04000000 /* Transmit Software Flush */
-
-/* Additional Receive Descriptor Control definitions */
-#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */
-#define IGC_RXDCTL_SWFLUSH 0x04000000 /* Receive Software Flush */
-
/* SRRCTL bit definitions */
#define IGC_SRRCTL_BSIZEPKT_MASK GENMASK(6, 0)
#define IGC_SRRCTL_BSIZEPKT(x) FIELD_PREP(IGC_SRRCTL_BSIZEPKT_MASK, \
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 7189dfc389ad..498ba1522ca4 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -383,11 +383,15 @@
#define IGC_RXDEXT_STATERR_IPE 0x40000000
#define IGC_RXDEXT_STATERR_RXE 0x80000000
+#define IGC_MRQC_ENABLE_RSS_MQ 0x00000002
#define IGC_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
#define IGC_MRQC_RSS_FIELD_IPV4 0x00020000
#define IGC_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
#define IGC_MRQC_RSS_FIELD_IPV6 0x00100000
#define IGC_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
+#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
+#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
+#define IGC_MRQC_DEFAULT_QUEUE_MASK GENMASK(5, 3)
/* Header split receive */
#define IGC_RFCTL_IPV6_EX_DIS 0x00010000
@@ -588,6 +592,7 @@
#define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001
#define IGC_TXQCTL_STRICT_CYCLE 0x00000002
#define IGC_TXQCTL_STRICT_END 0x00000004
+#define IGC_TXQCTL_PREEMPTIBLE 0x00000008
#define IGC_TXQCTL_QAV_SEL_MASK 0x000000C0
#define IGC_TXQCTL_QAV_SEL_CBS0 0x00000080
#define IGC_TXQCTL_QAV_SEL_CBS1 0x000000C0
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 3fc1eded9605..ecb35b693ce5 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -122,9 +122,11 @@ static const char igc_gstrings_test[][ETH_GSTRING_LEN] = {
#define IGC_STATS_LEN \
(IGC_GLOBAL_STATS_LEN + IGC_NETDEV_STATS_LEN + IGC_QUEUE_STATS_LEN)
+#define IGC_PRIV_FLAGS_LEGACY_RX BIT(0)
+#define IGC_PRIV_FLAGS_REVERSE_TSN_TXQ_PRIO BIT(1)
static const char igc_priv_flags_strings[][ETH_GSTRING_LEN] = {
-#define IGC_PRIV_FLAGS_LEGACY_RX BIT(0)
"legacy-rx",
+ "reverse-tsn-txq-prio",
};
#define IGC_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igc_priv_flags_strings)
@@ -1045,9 +1047,11 @@ static int igc_ethtool_get_nfc_rules(struct igc_adapter *adapter,
return 0;
}
-static int igc_ethtool_get_rss_hash_opts(struct igc_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+static int igc_ethtool_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct igc_adapter *adapter = netdev_priv(dev);
+
cmd->data = 0;
/* Report default options for RSS on igc */
@@ -1103,8 +1107,6 @@ static int igc_ethtool_get_rxnfc(struct net_device *dev,
return igc_ethtool_get_nfc_rule(adapter, cmd);
case ETHTOOL_GRXCLSRLALL:
return igc_ethtool_get_nfc_rules(adapter, cmd, rule_locs);
- case ETHTOOL_GRXFH:
- return igc_ethtool_get_rss_hash_opts(adapter, cmd);
default:
return -EOPNOTSUPP;
}
@@ -1112,9 +1114,11 @@ static int igc_ethtool_get_rxnfc(struct net_device *dev,
#define UDP_RSS_FLAGS (IGC_FLAG_RSS_FIELD_IPV4_UDP | \
IGC_FLAG_RSS_FIELD_IPV6_UDP)
-static int igc_ethtool_set_rss_hash_opt(struct igc_adapter *adapter,
- struct ethtool_rxnfc *nfc)
+static int igc_ethtool_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct igc_adapter *adapter = netdev_priv(dev);
u32 flags = adapter->flags;
/* RSS does not support anything other than hashing
@@ -1279,6 +1283,24 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule,
rule->flex = true;
else
rule->flex = false;
+
+ /* The wildcard rule is only applied if:
+ * a) None of the other filtering rules match (match_flags is zero)
+ * b) The flow type is ETHER_FLOW only (no additional fields set)
+ * c) Mask for Source MAC address is not specified (all zeros)
+ * d) Mask for Destination MAC address is not specified (all zeros)
+ * e) Mask for L2 EtherType is not specified (zero)
+ *
+ * If all these conditions are met, the rule is treated as a wildcard
+ * rule. Default queue feature will be used, so that all packets that do
+ * not match any other rule will be routed to the default queue.
+ */
+ if (!rule->filter.match_flags &&
+ fsp->flow_type == ETHER_FLOW &&
+ is_zero_ether_addr(fsp->m_u.ether_spec.h_source) &&
+ is_zero_ether_addr(fsp->m_u.ether_spec.h_dest) &&
+ !fsp->m_u.ether_spec.h_proto)
+ rule->filter.match_flags = IGC_FILTER_FLAG_DEFAULT_QUEUE;
}
/**
@@ -1425,8 +1447,6 @@ static int igc_ethtool_set_rxnfc(struct net_device *dev,
struct igc_adapter *adapter = netdev_priv(dev);
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- return igc_ethtool_set_rss_hash_opt(adapter, cmd);
case ETHTOOL_SRXCLSRLINS:
return igc_ethtool_add_nfc_rule(adapter, cmd);
case ETHTOOL_SRXCLSRLDEL:
@@ -1600,6 +1620,9 @@ static u32 igc_ethtool_get_priv_flags(struct net_device *netdev)
if (adapter->flags & IGC_FLAG_RX_LEGACY)
priv_flags |= IGC_PRIV_FLAGS_LEGACY_RX;
+ if (adapter->flags & IGC_FLAG_TSN_REVERSE_TXQ_PRIO)
+ priv_flags |= IGC_PRIV_FLAGS_REVERSE_TSN_TXQ_PRIO;
+
return priv_flags;
}
@@ -1608,10 +1631,13 @@ static int igc_ethtool_set_priv_flags(struct net_device *netdev, u32 priv_flags)
struct igc_adapter *adapter = netdev_priv(netdev);
unsigned int flags = adapter->flags;
- flags &= ~IGC_FLAG_RX_LEGACY;
+ flags &= ~(IGC_FLAG_RX_LEGACY | IGC_FLAG_TSN_REVERSE_TXQ_PRIO);
if (priv_flags & IGC_PRIV_FLAGS_LEGACY_RX)
flags |= IGC_FLAG_RX_LEGACY;
+ if (priv_flags & IGC_PRIV_FLAGS_REVERSE_TSN_TXQ_PRIO)
+ flags |= IGC_FLAG_TSN_REVERSE_TXQ_PRIO;
+
if (flags != adapter->flags) {
adapter->flags = flags;
@@ -2144,6 +2170,8 @@ static const struct ethtool_ops igc_ethtool_ops = {
.get_rxfh_indir_size = igc_ethtool_get_rxfh_indir_size,
.get_rxfh = igc_ethtool_get_rxfh,
.set_rxfh = igc_ethtool_set_rxfh,
+ .get_rxfh_fields = igc_ethtool_get_rxfh_fields,
+ .set_rxfh_fields = igc_ethtool_set_rxfh_fields,
.get_ts_info = igc_ethtool_get_ts_info,
.get_channels = igc_ethtool_get_channels,
.set_channels = igc_ethtool_set_channels,
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c
index d344e0a1cd5e..7ac6637f8db7 100644
--- a/drivers/net/ethernet/intel/igc/igc_mac.c
+++ b/drivers/net/ethernet/intel/igc/igc_mac.c
@@ -127,7 +127,7 @@ s32 igc_setup_link(struct igc_hw *hw)
goto out;
/* If requested flow control is set to default, set flow control
- * to the both 'rx' and 'tx' pause frames.
+ * to both 'rx' and 'tx' pause frames.
*/
if (hw->fc.requested_mode == igc_fc_default)
hw->fc.requested_mode = igc_fc_full;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 686793c539f2..458e5eaa92e5 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -683,9 +683,9 @@ static void igc_configure_rx_ring(struct igc_adapter *adapter,
wr32(IGC_SRRCTL(reg_idx), srrctl);
- rxdctl |= IGC_RX_PTHRESH;
- rxdctl |= IGC_RX_HTHRESH << 8;
- rxdctl |= IGC_RX_WTHRESH << 16;
+ rxdctl |= IGC_RXDCTL_PTHRESH;
+ rxdctl |= IGC_RXDCTL_HTHRESH << 8;
+ rxdctl |= IGC_RXDCTL_WTHRESH << 16;
/* initialize rx_buffer_info */
memset(ring->rx_buffer_info, 0,
@@ -749,11 +749,9 @@ static void igc_configure_tx_ring(struct igc_adapter *adapter,
wr32(IGC_TDH(reg_idx), 0);
writel(0, ring->tail);
- txdctl |= IGC_TX_PTHRESH;
- txdctl |= IGC_TX_HTHRESH << 8;
- txdctl |= IGC_TX_WTHRESH << 16;
+ txdctl |= IGC_TXDCTL_PTHRESH(8) | IGC_TXDCTL_HTHRESH(1) |
+ IGC_TXDCTL_WTHRESH(16) | IGC_TXDCTL_QUEUE_ENABLE;
- txdctl |= IGC_TXDCTL_QUEUE_ENABLE;
wr32(IGC_TXDCTL(reg_idx), txdctl);
}
@@ -1687,6 +1685,15 @@ done:
first->tx_flags = tx_flags;
first->protocol = protocol;
+ /* For preemptible queue, manually pad the skb so that HW includes
+ * padding bytes in mCRC calculation
+ */
+ if (tx_ring->preemptible && skb->len < ETH_ZLEN) {
+ if (skb_padto(skb, ETH_ZLEN))
+ goto out_drop;
+ skb_put(skb, ETH_ZLEN - skb->len);
+ }
+
tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len);
if (tso < 0)
goto out_drop;
@@ -3867,6 +3874,22 @@ static void igc_del_flex_filter(struct igc_adapter *adapter,
wr32(IGC_WUFC, wufc);
}
+static void igc_set_default_queue_filter(struct igc_adapter *adapter, u32 queue)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 mrqc = rd32(IGC_MRQC);
+
+ mrqc &= ~IGC_MRQC_DEFAULT_QUEUE_MASK;
+ mrqc |= FIELD_PREP(IGC_MRQC_DEFAULT_QUEUE_MASK, queue);
+ wr32(IGC_MRQC, mrqc);
+}
+
+static void igc_reset_default_queue_filter(struct igc_adapter *adapter)
+{
+ /* Reset the default queue to its default value which is Queue 0 */
+ igc_set_default_queue_filter(adapter, 0);
+}
+
static int igc_enable_nfc_rule(struct igc_adapter *adapter,
struct igc_nfc_rule *rule)
{
@@ -3905,6 +3928,9 @@ static int igc_enable_nfc_rule(struct igc_adapter *adapter,
return err;
}
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_DEFAULT_QUEUE)
+ igc_set_default_queue_filter(adapter, rule->action);
+
return 0;
}
@@ -3932,6 +3958,9 @@ static void igc_disable_nfc_rule(struct igc_adapter *adapter,
if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
rule->filter.dst_addr);
+
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_DEFAULT_QUEUE)
+ igc_reset_default_queue_filter(adapter);
}
/**
@@ -6296,24 +6325,6 @@ int igc_close(struct net_device *netdev)
return 0;
}
-/**
- * igc_ioctl - Access the hwtstamp interface
- * @netdev: network interface device structure
- * @ifr: interface request data
- * @cmd: ioctl command
- **/
-static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- case SIOCGHWTSTAMP:
- return igc_ptp_get_ts_config(netdev, ifr);
- case SIOCSHWTSTAMP:
- return igc_ptp_set_ts_config(netdev, ifr);
- default:
- return -EOPNOTSUPP;
- }
-}
-
static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue,
bool enable)
{
@@ -6423,6 +6434,7 @@ static int igc_qbv_clear_schedule(struct igc_adapter *adapter)
ring->start_time = 0;
ring->end_time = NSEC_PER_SEC;
ring->max_sdu = 0;
+ ring->preemptible = false;
}
spin_lock_irqsave(&adapter->qbv_tx_lock, flags);
@@ -6488,9 +6500,12 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
if (!validate_schedule(adapter, qopt))
return -EINVAL;
- /* preemptible isn't supported yet */
- if (qopt->mqprio.preemptible_tcs)
- return -EOPNOTSUPP;
+ if (qopt->mqprio.preemptible_tcs &&
+ !(adapter->flags & IGC_FLAG_TSN_REVERSE_TXQ_PRIO)) {
+ NL_SET_ERR_MSG_MOD(qopt->extack,
+ "reverse-tsn-txq-prio private flag must be enabled before setting preemptible tc");
+ return -ENODEV;
+ }
igc_ptp_read(adapter, &now);
@@ -6583,6 +6598,8 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
ring->max_sdu = 0;
}
+ igc_fpe_save_preempt_queue(adapter, &qopt->mqprio);
+
return 0;
}
@@ -6702,7 +6719,8 @@ static int igc_tc_query_caps(struct igc_adapter *adapter,
case TC_SETUP_QDISC_TAPRIO: {
struct tc_taprio_caps *caps = base->caps;
- caps->broken_mqprio = true;
+ if (!(adapter->flags & IGC_FLAG_TSN_REVERSE_TXQ_PRIO))
+ caps->broken_mqprio = true;
if (hw->mac.type == igc_i225) {
caps->supports_queue_max_sdu = true;
@@ -6728,6 +6746,20 @@ static void igc_save_mqprio_params(struct igc_adapter *adapter, u8 num_tc,
adapter->queue_per_tc[i] = offset[i];
}
+static bool
+igc_tsn_is_tc_to_queue_priority_ordered(struct tc_mqprio_qopt_offload *mqprio)
+{
+ int num_tc = mqprio->qopt.num_tc;
+ int i;
+
+ for (i = 1; i < num_tc; i++) {
+ if (mqprio->qopt.offset[i - 1] > mqprio->qopt.offset[i])
+ return false;
+ }
+
+ return true;
+}
+
static int igc_tsn_enable_mqprio(struct igc_adapter *adapter,
struct tc_mqprio_qopt_offload *mqprio)
{
@@ -6739,6 +6771,7 @@ static int igc_tsn_enable_mqprio(struct igc_adapter *adapter,
if (!mqprio->qopt.num_tc) {
adapter->strict_priority_enable = false;
+ igc_fpe_clear_preempt_queue(adapter);
netdev_reset_tc(adapter->netdev);
goto apply;
}
@@ -6760,10 +6793,9 @@ static int igc_tsn_enable_mqprio(struct igc_adapter *adapter,
}
}
- /* Preemption is not supported yet. */
- if (mqprio->preemptible_tcs) {
+ if (!igc_tsn_is_tc_to_queue_priority_ordered(mqprio)) {
NL_SET_ERR_MSG_MOD(mqprio->extack,
- "Preemption is not supported yet");
+ "tc to queue mapping must preserve increasing priority (higher tc -> higher queue)");
return -EOPNOTSUPP;
}
@@ -6786,6 +6818,7 @@ static int igc_tsn_enable_mqprio(struct igc_adapter *adapter,
adapter->queue_per_tc[i] = i;
mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ igc_fpe_save_preempt_queue(adapter, mqprio);
apply:
return igc_tsn_offload_apply(adapter);
@@ -6942,12 +6975,13 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_fix_features = igc_fix_features,
.ndo_set_features = igc_set_features,
.ndo_features_check = igc_features_check,
- .ndo_eth_ioctl = igc_ioctl,
.ndo_setup_tc = igc_setup_tc,
.ndo_bpf = igc_bpf,
.ndo_xdp_xmit = igc_xdp_xmit,
.ndo_xsk_wakeup = igc_xsk_wakeup,
.ndo_get_tstamp = igc_get_tstamp,
+ .ndo_hwtstamp_get = igc_ptp_hwtstamp_get,
+ .ndo_hwtstamp_set = igc_ptp_hwtstamp_set,
};
u32 igc_rd32(struct igc_hw *hw, u32 reg)
@@ -7115,6 +7149,10 @@ static int igc_probe(struct pci_dev *pdev,
adapter->port_num = hw->bus.func;
adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+ /* Disable ASPM L1.2 on I226 devices to avoid packet loss */
+ if (igc_is_device_id_i226(hw))
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
+
err = pci_save_state(pdev);
if (err)
goto err_ioremap;
@@ -7500,6 +7538,9 @@ static int __igc_resume(struct device *dev, bool rpm)
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
+ if (igc_is_device_id_i226(hw))
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
+
if (igc_init_interrupt_scheme(adapter, true)) {
netdev_err(netdev, "Unable to allocate memory for queues\n");
return -ENOMEM;
@@ -7625,6 +7666,9 @@ static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
+ if (igc_is_device_id_i226(hw))
+ pci_disable_link_state_locked(pdev, PCIE_LINK_STATE_L1_2);
+
/* In case of PCI error, adapter loses its HW address
* so we should re-assign it here.
*/
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index f4f5c28615d3..b7b46d863bee 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -626,7 +626,7 @@ static void igc_ptp_enable_tx_timestamp(struct igc_adapter *adapter)
* Return: 0 in case of success, negative errno code otherwise.
*/
static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
@@ -853,48 +853,46 @@ void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter)
}
/**
- * igc_ptp_set_ts_config - set hardware time stamping config
+ * igc_ptp_hwtstamp_set - set hardware time stamping config
* @netdev: network interface device structure
- * @ifr: interface request data
+ * @config: timestamping configuration structure
+ * @extack: netlink extended ack structure for error reporting
*
**/
-int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
+int igc_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct igc_adapter *adapter = netdev_priv(netdev);
- struct hwtstamp_config config;
int err;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = igc_ptp_set_timestamp_mode(adapter, &config);
+ err = igc_ptp_set_timestamp_mode(adapter, config);
if (err)
return err;
/* save these settings for future reference */
- memcpy(&adapter->tstamp_config, &config,
- sizeof(adapter->tstamp_config));
+ adapter->tstamp_config = *config;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
/**
- * igc_ptp_get_ts_config - get hardware time stamping config
+ * igc_ptp_hwtstamp_get - get hardware time stamping config
* @netdev: network interface device structure
- * @ifr: interface request data
+ * @config: timestamping configuration structure
*
* Get the hwtstamp_config settings to return to the user. Rather than attempt
* to deconstruct the settings from the registers, just return a shadow copy
* of the last known settings.
**/
-int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
+int igc_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
struct igc_adapter *adapter = netdev_priv(netdev);
- struct hwtstamp_config *config = &adapter->tstamp_config;
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
+ *config = adapter->tstamp_config;
+
+ return 0;
}
/* The two conditions below must be met for cross timestamping via
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
index f22cc4d4f459..8a110145bfee 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.c
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
@@ -13,6 +13,13 @@
#define TX_MAX_FRAG_SIZE (TX_MIN_FRAG_SIZE * \
(MAX_MULTPLIER_TX_MIN_FRAG + 1))
+enum tx_queue {
+ TX_QUEUE_0 = 0,
+ TX_QUEUE_1,
+ TX_QUEUE_2,
+ TX_QUEUE_3,
+};
+
DEFINE_STATIC_KEY_FALSE(igc_fpe_enabled);
static int igc_fpe_init_smd_frame(struct igc_ring *ring,
@@ -109,6 +116,18 @@ static int igc_fpe_xmit_smd_frame(struct igc_adapter *adapter,
return err;
}
+static void igc_fpe_configure_tx(struct ethtool_mmsv *mmsv, bool tx_enable)
+{
+ struct igc_fpe_t *fpe = container_of(mmsv, struct igc_fpe_t, mmsv);
+ struct igc_adapter *adapter;
+
+ adapter = container_of(fpe, struct igc_adapter, fpe);
+ adapter->fpe.tx_enabled = tx_enable;
+
+ /* Update config since tx_enabled affects preemptible queue configuration */
+ igc_tsn_offload_apply(adapter);
+}
+
static void igc_fpe_send_mpacket(struct ethtool_mmsv *mmsv,
enum ethtool_mpacket type)
{
@@ -130,15 +149,59 @@ static void igc_fpe_send_mpacket(struct ethtool_mmsv *mmsv,
}
static const struct ethtool_mmsv_ops igc_mmsv_ops = {
+ .configure_tx = igc_fpe_configure_tx,
.send_mpacket = igc_fpe_send_mpacket,
};
void igc_fpe_init(struct igc_adapter *adapter)
{
adapter->fpe.tx_min_frag_size = TX_MIN_FRAG_SIZE;
+ adapter->fpe.tx_enabled = false;
ethtool_mmsv_init(&adapter->fpe.mmsv, adapter->netdev, &igc_mmsv_ops);
}
+void igc_fpe_clear_preempt_queue(struct igc_adapter *adapter)
+{
+ for (int i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *tx_ring = adapter->tx_ring[i];
+
+ tx_ring->preemptible = false;
+ }
+}
+
+static u32 igc_fpe_map_preempt_tc_to_queue(const struct igc_adapter *adapter,
+ unsigned long preemptible_tcs)
+{
+ struct net_device *dev = adapter->netdev;
+ u32 i, queue = 0;
+
+ for (i = 0; i < dev->num_tc; i++) {
+ u32 offset, count;
+
+ if (!(preemptible_tcs & BIT(i)))
+ continue;
+
+ offset = dev->tc_to_txq[i].offset;
+ count = dev->tc_to_txq[i].count;
+ queue |= GENMASK(offset + count - 1, offset);
+ }
+
+ return queue;
+}
+
+void igc_fpe_save_preempt_queue(struct igc_adapter *adapter,
+ const struct tc_mqprio_qopt_offload *mqprio)
+{
+ u32 preemptible_queue = igc_fpe_map_preempt_tc_to_queue(adapter,
+ mqprio->preemptible_tcs);
+
+ for (int i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *tx_ring = adapter->tx_ring[i];
+
+ tx_ring->preemptible = !!(preemptible_queue & BIT(i));
+ }
+}
+
static bool is_any_launchtime(struct igc_adapter *adapter)
{
int i;
@@ -238,7 +301,7 @@ bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter)
adapter->taprio_offload_enable;
}
-static void igc_tsn_tx_arb(struct igc_adapter *adapter, u16 *queue_per_tc)
+static void igc_tsn_tx_arb(struct igc_adapter *adapter, bool reverse_prio)
{
struct igc_hw *hw = &adapter->hw;
u32 txarb;
@@ -250,10 +313,17 @@ static void igc_tsn_tx_arb(struct igc_adapter *adapter, u16 *queue_per_tc)
IGC_TXARB_TXQ_PRIO_2_MASK |
IGC_TXARB_TXQ_PRIO_3_MASK);
- txarb |= IGC_TXARB_TXQ_PRIO_0(queue_per_tc[3]);
- txarb |= IGC_TXARB_TXQ_PRIO_1(queue_per_tc[2]);
- txarb |= IGC_TXARB_TXQ_PRIO_2(queue_per_tc[1]);
- txarb |= IGC_TXARB_TXQ_PRIO_3(queue_per_tc[0]);
+ if (reverse_prio) {
+ txarb |= IGC_TXARB_TXQ_PRIO_0(TX_QUEUE_3);
+ txarb |= IGC_TXARB_TXQ_PRIO_1(TX_QUEUE_2);
+ txarb |= IGC_TXARB_TXQ_PRIO_2(TX_QUEUE_1);
+ txarb |= IGC_TXARB_TXQ_PRIO_3(TX_QUEUE_0);
+ } else {
+ txarb |= IGC_TXARB_TXQ_PRIO_0(TX_QUEUE_0);
+ txarb |= IGC_TXARB_TXQ_PRIO_1(TX_QUEUE_1);
+ txarb |= IGC_TXARB_TXQ_PRIO_2(TX_QUEUE_2);
+ txarb |= IGC_TXARB_TXQ_PRIO_3(TX_QUEUE_3);
+ }
wr32(IGC_TXARB, txarb);
}
@@ -286,7 +356,6 @@ static void igc_tsn_set_rxpbsize(struct igc_adapter *adapter,
*/
static int igc_tsn_disable_offload(struct igc_adapter *adapter)
{
- u16 queue_per_tc[4] = { 3, 2, 1, 0 };
struct igc_hw *hw = &adapter->hw;
u32 tqavctrl;
int i;
@@ -308,9 +377,16 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
wr32(IGC_TQAVCTRL, tqavctrl);
for (i = 0; i < adapter->num_tx_queues; i++) {
+ int reg_idx = adapter->tx_ring[i]->reg_idx;
+ u32 txdctl;
+
wr32(IGC_TXQCTL(i), 0);
wr32(IGC_STQT(i), 0);
wr32(IGC_ENDQT(i), NSEC_PER_SEC);
+
+ txdctl = rd32(IGC_TXDCTL(reg_idx));
+ txdctl &= ~IGC_TXDCTL_PRIORITY_HIGH;
+ wr32(IGC_TXDCTL(reg_idx), txdctl);
}
wr32(IGC_QBVCYCLET_S, 0);
@@ -319,7 +395,7 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
/* Restore the default Tx arbitration: Priority 0 has the highest
* priority and is assigned to queue 0 and so on and so forth.
*/
- igc_tsn_tx_arb(adapter, queue_per_tc);
+ igc_tsn_tx_arb(adapter, false);
adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED;
@@ -355,7 +431,7 @@ static u8 igc_fpe_get_frag_size_mult(const struct igc_fpe_t *fpe)
u32 igc_fpe_get_supported_frag_size(u32 frag_size)
{
- const u32 supported_sizes[] = {64, 128, 192, 256};
+ static const u32 supported_sizes[] = { 64, 128, 192, 256 };
/* Find the smallest supported size that is >= frag_size */
for (int i = 0; i < ARRAY_SIZE(supported_sizes); i++) {
@@ -385,15 +461,13 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
if (igc_is_device_id_i226(hw))
igc_tsn_set_retx_qbvfullthreshold(adapter);
- if (adapter->strict_priority_enable) {
- /* Configure queue priorities according to the user provided
- * mapping.
- */
- igc_tsn_tx_arb(adapter, adapter->queue_per_tc);
- }
+ if (adapter->strict_priority_enable ||
+ adapter->flags & IGC_FLAG_TSN_REVERSE_TXQ_PRIO)
+ igc_tsn_tx_arb(adapter, true);
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
+ u32 txdctl = rd32(IGC_TXDCTL(ring->reg_idx));
u32 txqctl = 0;
u16 cbs_value;
u32 tqavcc;
@@ -427,6 +501,22 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
if (ring->launchtime_enable)
txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT;
+ if (!adapter->fpe.tx_enabled) {
+ /* fpe inactive: clear both flags */
+ txqctl &= ~IGC_TXQCTL_PREEMPTIBLE;
+ txdctl &= ~IGC_TXDCTL_PRIORITY_HIGH;
+ } else if (ring->preemptible) {
+ /* fpe active + preemptible: enable preemptible queue + set low priority */
+ txqctl |= IGC_TXQCTL_PREEMPTIBLE;
+ txdctl &= ~IGC_TXDCTL_PRIORITY_HIGH;
+ } else {
+ /* fpe active + express: enable express queue + set high priority */
+ txqctl &= ~IGC_TXQCTL_PREEMPTIBLE;
+ txdctl |= IGC_TXDCTL_PRIORITY_HIGH;
+ }
+
+ wr32(IGC_TXDCTL(ring->reg_idx), txdctl);
+
/* Skip configuring CBS for Q2 and Q3 */
if (i > 1)
goto skip_cbs;
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.h b/drivers/net/ethernet/intel/igc/igc_tsn.h
index c2a77229207b..a95b893459d7 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.h
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.h
@@ -4,6 +4,8 @@
#ifndef _IGC_TSN_H_
#define _IGC_TSN_H_
+#include <net/pkt_sched.h>
+
#define IGC_RX_MIN_FRAG_SIZE 60
#define SMD_FRAME_SIZE 60
@@ -15,6 +17,9 @@ enum igc_txd_popts_type {
DECLARE_STATIC_KEY_FALSE(igc_fpe_enabled);
void igc_fpe_init(struct igc_adapter *adapter);
+void igc_fpe_clear_preempt_queue(struct igc_adapter *adapter);
+void igc_fpe_save_preempt_queue(struct igc_adapter *adapter,
+ const struct tc_mqprio_qopt_offload *mqprio);
u32 igc_fpe_get_supported_frag_size(u32 frag_size);
int igc_tsn_offload_apply(struct igc_adapter *adapter);
int igc_tsn_reset(struct igc_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/devlink/region.c b/drivers/net/ethernet/intel/ixgbe/devlink/region.c
index 76f6571c3c34..478b4f435120 100644
--- a/drivers/net/ethernet/intel/ixgbe/devlink/region.c
+++ b/drivers/net/ethernet/intel/ixgbe/devlink/region.c
@@ -74,7 +74,7 @@ static int ixgbe_devlink_nvm_snapshot(struct devlink *devlink,
* total period of reading whole NVM is longer than the maximum
* period the lock can be taken defined by the IXGBE_NVM_TIMEOUT.
*/
- err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_READ);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed to acquire NVM semaphore");
@@ -184,7 +184,7 @@ static int ixgbe_devlink_nvm_read(struct devlink *devlink,
return -ERANGE;
}
- err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_READ);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
return -EBUSY;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 47311b134a7a..14d275270123 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -429,6 +429,10 @@ enum ixgbe_ring_f_enum {
#define IXGBE_BAD_L2A_QUEUE 3
#define IXGBE_MAX_MACVLANS 63
+#define IXGBE_MAX_TX_QUEUES 128
+#define IXGBE_MAX_TX_DESCRIPTORS 40
+#define IXGBE_MAX_TX_VF_HANGS 4
+
DECLARE_STATIC_KEY_FALSE(ixgbe_xdp_locking_key);
struct ixgbe_ring_feature {
@@ -507,9 +511,10 @@ struct ixgbe_q_vector {
struct ixgbe_ring_container rx, tx;
struct napi_struct napi;
+ struct rcu_head rcu; /* to avoid race with update stats on free */
+
cpumask_t affinity_mask;
int numa_node;
- struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
/* for dynamic allocation of rings associated with this q_vector */
@@ -752,6 +757,7 @@ struct ixgbe_adapter {
bool link_up;
unsigned long sfp_poll_time;
unsigned long link_check_timeout;
+ u32 link_down_events;
struct timer_list service_timer;
struct work_struct service_task;
@@ -784,7 +790,7 @@ struct ixgbe_adapter {
struct ptp_clock_info ptp_caps;
struct work_struct ptp_tx_work;
struct sk_buff *ptp_tx_skb;
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
unsigned long ptp_tx_start;
unsigned long last_overflow_check;
unsigned long last_rx_ptp_check;
@@ -809,6 +815,7 @@ struct ixgbe_adapter {
u32 timer_event_accumulator;
u32 vferr_refcount;
struct ixgbe_mac_addr *mac_table;
+ u8 tx_hang_count[IXGBE_MAX_TX_QUEUES];
struct kobject *info_kobj;
u16 lse_mask;
#ifdef CONFIG_IXGBE_HWMON
@@ -1079,8 +1086,11 @@ static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
rx_ring->last_rx_timestamp = jiffies;
}
-int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
-int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
+int ixgbe_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int ixgbe_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 444da982593f..406c15f58034 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -45,7 +45,7 @@ static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
goto out;
/*
- * if capababilities version is type 1 we can write the
+ * if capabilities version is type 1 we can write the
* timeout of 10ms to 250ms through the GCR register
*/
if (!(gcr & IXGBE_GCR_CAP_VER2)) {
@@ -751,7 +751,7 @@ mac_reset_top:
/*
* Store the original AUTOC value if it has not been
* stored off yet. Otherwise restore the stored original
- * AUTOC value since the reset operation sets back to deaults.
+ * AUTOC value since the reset operation sets back to defaults.
*/
autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
if (hw->mac.orig_link_settings_stored == false) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 5784d5d1896e..4ff19426ab74 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -244,7 +244,7 @@ int ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
*/
if (hw->phy.media_type == ixgbe_media_type_backplane) {
/* Need the SW/FW semaphore around AUTOC writes if 82599 and
- * LESM is on, likewise reset_pipeline requries the lock as
+ * LESM is on, likewise reset_pipeline requires the lock as
* it also writes AUTOC.
*/
ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
@@ -301,7 +301,7 @@ int ixgbe_start_hw_generic(struct ixgbe_hw *hw)
return ret_val;
}
- /* Cashe bit indicating need for crosstalk fix */
+ /* Cache bit indicating need for crosstalk fix */
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X550EM_x:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
index 71ea25de1bac..d74116441d1c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
@@ -56,7 +56,7 @@ static bool ixgbe_should_retry_aci_send_cmd_execute(u16 opcode)
* Admin Command failed with error Y.
*/
static int ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw,
- struct ixgbe_aci_desc *desc,
+ struct libie_aq_desc *desc,
void *buf, u16 buf_size)
{
u16 opcode, buf_tail_size = buf_size % 4;
@@ -64,7 +64,7 @@ static int ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw,
u32 hicr, i, buf_tail = 0;
bool valid_buf = false;
- hw->aci.last_status = IXGBE_ACI_RC_OK;
+ hw->aci.last_status = LIBIE_AQ_RC_OK;
/* It's necessary to check if mechanism is enabled */
hicr = IXGBE_READ_REG(hw, IXGBE_PF_HICR);
@@ -73,7 +73,7 @@ static int ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw,
return -EIO;
if (hicr & IXGBE_PF_HICR_C) {
- hw->aci.last_status = IXGBE_ACI_RC_EBUSY;
+ hw->aci.last_status = LIBIE_AQ_RC_EBUSY;
return -EBUSY;
}
@@ -83,9 +83,9 @@ static int ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw,
return -EINVAL;
if (buf)
- desc->flags |= cpu_to_le16(IXGBE_ACI_FLAG_BUF);
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_BUF);
- if (desc->flags & cpu_to_le16(IXGBE_ACI_FLAG_BUF)) {
+ if (desc->flags & cpu_to_le16(LIBIE_AQ_FLAG_BUF)) {
if ((buf && !buf_size) ||
(!buf && buf_size))
return -EINVAL;
@@ -98,12 +98,12 @@ static int ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw,
memcpy(&buf_tail, buf + buf_size - buf_tail_size,
buf_tail_size);
- if (((buf_size + 3) & ~0x3) > IXGBE_ACI_LG_BUF)
- desc->flags |= cpu_to_le16(IXGBE_ACI_FLAG_LB);
+ if (((buf_size + 3) & ~0x3) > LIBIE_AQ_LG_BUF)
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
desc->datalen = cpu_to_le16(buf_size);
- if (desc->flags & cpu_to_le16(IXGBE_ACI_FLAG_RD)) {
+ if (desc->flags & cpu_to_le16(LIBIE_AQ_FLAG_RD)) {
for (i = 0; i < buf_size / 4; i++)
IXGBE_WRITE_REG(hw, IXGBE_PF_HIBA(i), ((u32 *)buf)[i]);
if (buf_tail_size)
@@ -174,7 +174,7 @@ static int ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw,
return -EIO;
if (desc->retval) {
- hw->aci.last_status = (enum ixgbe_aci_err)
+ hw->aci.last_status = (enum libie_aq_err)
le16_to_cpu(desc->retval);
return -EIO;
}
@@ -207,12 +207,12 @@ static int ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw,
*
* Return: the exit code of the operation.
*/
-int ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
+int ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct libie_aq_desc *desc,
void *buf, u16 buf_size)
{
u16 opcode = le16_to_cpu(desc->opcode);
- struct ixgbe_aci_desc desc_cpy;
- enum ixgbe_aci_err last_status;
+ struct libie_aq_desc desc_cpy;
+ enum libie_aq_err last_status;
u8 idx = 0, *buf_cpy = NULL;
bool is_cmd_for_retry;
unsigned long timeout;
@@ -237,7 +237,7 @@ int ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
mutex_unlock(&hw->aci.lock);
if (!is_cmd_for_retry || !err ||
- last_status != IXGBE_ACI_RC_EBUSY)
+ last_status != LIBIE_AQ_RC_EBUSY)
break;
if (buf)
@@ -286,7 +286,7 @@ bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw)
int ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
bool *pending)
{
- struct ixgbe_aci_desc desc;
+ struct libie_aq_desc desc;
int err;
if (!e || (!e->msg_buf && e->buf_len))
@@ -335,12 +335,12 @@ aci_get_event_exit:
* Helper function to fill the descriptor desc with default values
* and the provided opcode.
*/
-void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode)
+void ixgbe_fill_dflt_direct_cmd_desc(struct libie_aq_desc *desc, u16 opcode)
{
/* Zero out the desc. */
memset(desc, 0, sizeof(*desc));
desc->opcode = cpu_to_le16(opcode);
- desc->flags = cpu_to_le16(IXGBE_ACI_FLAG_SI);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_SI);
}
/**
@@ -353,8 +353,8 @@ void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode)
*/
static int ixgbe_aci_get_fw_ver(struct ixgbe_hw *hw)
{
- struct ixgbe_aci_cmd_get_ver *resp;
- struct ixgbe_aci_desc desc;
+ struct libie_aqc_get_ver *resp;
+ struct libie_aq_desc desc;
int err;
resp = &desc.params.get_ver;
@@ -393,12 +393,12 @@ static int ixgbe_aci_get_fw_ver(struct ixgbe_hw *hw)
*
* Return: the exit code of the operation.
*/
-static int ixgbe_aci_req_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
- enum ixgbe_aci_res_access_type access,
+static int ixgbe_aci_req_res(struct ixgbe_hw *hw, enum libie_aq_res_id res,
+ enum libie_aq_res_access_type access,
u8 sdp_number, u32 *timeout)
{
- struct ixgbe_aci_cmd_req_res *cmd_resp;
- struct ixgbe_aci_desc desc;
+ struct libie_aqc_req_res *cmd_resp;
+ struct libie_aq_desc desc;
int err;
cmd_resp = &desc.params.res_owner;
@@ -417,7 +417,7 @@ static int ixgbe_aci_req_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
* with a busy return value and the timeout field indicates the maximum
* time the current owner of the resource has to free it.
*/
- if (!err || hw->aci.last_status == IXGBE_ACI_RC_EBUSY)
+ if (!err || hw->aci.last_status == LIBIE_AQ_RC_EBUSY)
*timeout = le32_to_cpu(cmd_resp->timeout);
return err;
@@ -433,11 +433,11 @@ static int ixgbe_aci_req_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
*
* Return: the exit code of the operation.
*/
-static int ixgbe_aci_release_res(struct ixgbe_hw *hw,
- enum ixgbe_aci_res_ids res, u8 sdp_number)
+static int ixgbe_aci_release_res(struct ixgbe_hw *hw, enum libie_aq_res_id res,
+ u8 sdp_number)
{
- struct ixgbe_aci_cmd_req_res *cmd;
- struct ixgbe_aci_desc desc;
+ struct libie_aqc_req_res *cmd;
+ struct libie_aq_desc desc;
cmd = &desc.params.res_owner;
@@ -465,8 +465,8 @@ static int ixgbe_aci_release_res(struct ixgbe_hw *hw,
*
* Return: the exit code of the operation.
*/
-int ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
- enum ixgbe_aci_res_access_type access, u32 timeout)
+int ixgbe_acquire_res(struct ixgbe_hw *hw, enum libie_aq_res_id res,
+ enum libie_aq_res_access_type access, u32 timeout)
{
#define IXGBE_RES_POLLING_DELAY_MS 10
u32 delay = IXGBE_RES_POLLING_DELAY_MS;
@@ -514,7 +514,7 @@ int ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
*
* Release a common resource using ixgbe_aci_release_res.
*/
-void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res)
+void ixgbe_release_res(struct ixgbe_hw *hw, enum libie_aq_res_id res)
{
u32 total_delay = 0;
int err;
@@ -547,7 +547,7 @@ void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res)
*/
static bool ixgbe_parse_e610_caps(struct ixgbe_hw *hw,
struct ixgbe_hw_caps *caps,
- struct ixgbe_aci_cmd_list_caps_elem *elem,
+ struct libie_aqc_list_caps_elem *elem,
const char *prefix)
{
u32 logical_id = le32_to_cpu(elem->logical_id);
@@ -556,67 +556,67 @@ static bool ixgbe_parse_e610_caps(struct ixgbe_hw *hw,
u16 cap = le16_to_cpu(elem->cap);
switch (cap) {
- case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
+ case LIBIE_AQC_CAPS_VALID_FUNCTIONS:
caps->valid_functions = number;
break;
- case IXGBE_ACI_CAPS_SRIOV:
+ case LIBIE_AQC_CAPS_SRIOV:
caps->sr_iov_1_1 = (number == 1);
break;
- case IXGBE_ACI_CAPS_VMDQ:
+ case LIBIE_AQC_CAPS_VMDQ:
caps->vmdq = (number == 1);
break;
- case IXGBE_ACI_CAPS_DCB:
+ case LIBIE_AQC_CAPS_DCB:
caps->dcb = (number == 1);
caps->active_tc_bitmap = logical_id;
caps->maxtc = phys_id;
break;
- case IXGBE_ACI_CAPS_RSS:
+ case LIBIE_AQC_CAPS_RSS:
caps->rss_table_size = number;
caps->rss_table_entry_width = logical_id;
break;
- case IXGBE_ACI_CAPS_RXQS:
+ case LIBIE_AQC_CAPS_RXQS:
caps->num_rxq = number;
caps->rxq_first_id = phys_id;
break;
- case IXGBE_ACI_CAPS_TXQS:
+ case LIBIE_AQC_CAPS_TXQS:
caps->num_txq = number;
caps->txq_first_id = phys_id;
break;
- case IXGBE_ACI_CAPS_MSIX:
+ case LIBIE_AQC_CAPS_MSIX:
caps->num_msix_vectors = number;
caps->msix_vector_first_id = phys_id;
break;
- case IXGBE_ACI_CAPS_NVM_VER:
+ case LIBIE_AQC_CAPS_NVM_VER:
break;
- case IXGBE_ACI_CAPS_PENDING_NVM_VER:
+ case LIBIE_AQC_CAPS_PENDING_NVM_VER:
caps->nvm_update_pending_nvm = true;
break;
- case IXGBE_ACI_CAPS_PENDING_OROM_VER:
+ case LIBIE_AQC_CAPS_PENDING_OROM_VER:
caps->nvm_update_pending_orom = true;
break;
- case IXGBE_ACI_CAPS_PENDING_NET_VER:
+ case LIBIE_AQC_CAPS_PENDING_NET_VER:
caps->nvm_update_pending_netlist = true;
break;
- case IXGBE_ACI_CAPS_NVM_MGMT:
+ case LIBIE_AQC_CAPS_NVM_MGMT:
caps->nvm_unified_update =
(number & IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
true : false;
break;
- case IXGBE_ACI_CAPS_MAX_MTU:
+ case LIBIE_AQC_CAPS_MAX_MTU:
caps->max_mtu = number;
break;
- case IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE:
+ case LIBIE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
caps->pcie_reset_avoidance = (number > 0);
break;
- case IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT:
+ case LIBIE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
caps->reset_restrict_support = (number == 1);
break;
- case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0:
- case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1:
- case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2:
- case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3:
+ case LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG0:
+ case LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG1:
+ case LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG2:
+ case LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG3:
{
- u8 index = cap - IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0;
+ u8 index = cap - LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG0;
caps->ext_topo_dev_img_ver_high[index] = number;
caps->ext_topo_dev_img_ver_low[index] = logical_id;
@@ -637,62 +637,62 @@ static bool ixgbe_parse_e610_caps(struct ixgbe_hw *hw,
}
/**
- * ixgbe_parse_valid_functions_cap - Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS caps
+ * ixgbe_parse_valid_functions_cap - Parse LIBIE_AQC_CAPS_VALID_FUNCTIONS caps
* @hw: pointer to the HW struct
* @dev_p: pointer to device capabilities structure
* @cap: capability element to parse
*
- * Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS for device capabilities.
+ * Parse LIBIE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
*/
static void
ixgbe_parse_valid_functions_cap(struct ixgbe_hw *hw,
struct ixgbe_hw_dev_caps *dev_p,
- struct ixgbe_aci_cmd_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
dev_p->num_funcs = hweight32(le32_to_cpu(cap->number));
}
/**
- * ixgbe_parse_vf_dev_caps - Parse IXGBE_ACI_CAPS_VF device caps
+ * ixgbe_parse_vf_dev_caps - Parse LIBIE_AQC_CAPS_VF device caps
* @hw: pointer to the HW struct
* @dev_p: pointer to device capabilities structure
* @cap: capability element to parse
*
- * Parse IXGBE_ACI_CAPS_VF for device capabilities.
+ * Parse LIBIE_AQC_CAPS_VF for device capabilities.
*/
static void ixgbe_parse_vf_dev_caps(struct ixgbe_hw *hw,
struct ixgbe_hw_dev_caps *dev_p,
- struct ixgbe_aci_cmd_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
dev_p->num_vfs_exposed = le32_to_cpu(cap->number);
}
/**
- * ixgbe_parse_vsi_dev_caps - Parse IXGBE_ACI_CAPS_VSI device caps
+ * ixgbe_parse_vsi_dev_caps - Parse LIBIE_AQC_CAPS_VSI device caps
* @hw: pointer to the HW struct
* @dev_p: pointer to device capabilities structure
* @cap: capability element to parse
*
- * Parse IXGBE_ACI_CAPS_VSI for device capabilities.
+ * Parse LIBIE_AQC_CAPS_VSI for device capabilities.
*/
static void ixgbe_parse_vsi_dev_caps(struct ixgbe_hw *hw,
struct ixgbe_hw_dev_caps *dev_p,
- struct ixgbe_aci_cmd_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
dev_p->num_vsi_allocd_to_host = le32_to_cpu(cap->number);
}
/**
- * ixgbe_parse_fdir_dev_caps - Parse IXGBE_ACI_CAPS_FD device caps
+ * ixgbe_parse_fdir_dev_caps - Parse LIBIE_AQC_CAPS_FD device caps
* @hw: pointer to the HW struct
* @dev_p: pointer to device capabilities structure
* @cap: capability element to parse
*
- * Parse IXGBE_ACI_CAPS_FD for device capabilities.
+ * Parse LIBIE_AQC_CAPS_FD for device capabilities.
*/
static void ixgbe_parse_fdir_dev_caps(struct ixgbe_hw *hw,
struct ixgbe_hw_dev_caps *dev_p,
- struct ixgbe_aci_cmd_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
dev_p->num_flow_director_fltr = le32_to_cpu(cap->number);
}
@@ -715,10 +715,10 @@ static void ixgbe_parse_dev_caps(struct ixgbe_hw *hw,
struct ixgbe_hw_dev_caps *dev_p,
void *buf, u32 cap_count)
{
- struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
+ struct libie_aqc_list_caps_elem *cap_resp;
u32 i;
- cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
+ cap_resp = (struct libie_aqc_list_caps_elem *)buf;
memset(dev_p, 0, sizeof(*dev_p));
@@ -729,17 +729,17 @@ static void ixgbe_parse_dev_caps(struct ixgbe_hw *hw,
"dev caps");
switch (cap) {
- case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
+ case LIBIE_AQC_CAPS_VALID_FUNCTIONS:
ixgbe_parse_valid_functions_cap(hw, dev_p,
&cap_resp[i]);
break;
- case IXGBE_ACI_CAPS_VF:
+ case LIBIE_AQC_CAPS_VF:
ixgbe_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
break;
- case IXGBE_ACI_CAPS_VSI:
+ case LIBIE_AQC_CAPS_VSI:
ixgbe_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
break;
- case IXGBE_ACI_CAPS_FD:
+ case LIBIE_AQC_CAPS_FD:
ixgbe_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
break;
default:
@@ -750,16 +750,16 @@ static void ixgbe_parse_dev_caps(struct ixgbe_hw *hw,
}
/**
- * ixgbe_parse_vf_func_caps - Parse IXGBE_ACI_CAPS_VF function caps
+ * ixgbe_parse_vf_func_caps - Parse LIBIE_AQC_CAPS_VF function caps
* @hw: pointer to the HW struct
* @func_p: pointer to function capabilities structure
* @cap: pointer to the capability element to parse
*
- * Extract function capabilities for IXGBE_ACI_CAPS_VF.
+ * Extract function capabilities for LIBIE_AQC_CAPS_VF.
*/
static void ixgbe_parse_vf_func_caps(struct ixgbe_hw *hw,
struct ixgbe_hw_func_caps *func_p,
- struct ixgbe_aci_cmd_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
func_p->num_allocd_vfs = le32_to_cpu(cap->number);
func_p->vf_base_id = le32_to_cpu(cap->logical_id);
@@ -786,16 +786,16 @@ static u32 ixgbe_get_num_per_func(struct ixgbe_hw *hw, u32 max)
}
/**
- * ixgbe_parse_vsi_func_caps - Parse IXGBE_ACI_CAPS_VSI function caps
+ * ixgbe_parse_vsi_func_caps - Parse LIBIE_AQC_CAPS_VSI function caps
* @hw: pointer to the HW struct
* @func_p: pointer to function capabilities structure
* @cap: pointer to the capability element to parse
*
- * Extract function capabilities for IXGBE_ACI_CAPS_VSI.
+ * Extract function capabilities for LIBIE_AQC_CAPS_VSI.
*/
static void ixgbe_parse_vsi_func_caps(struct ixgbe_hw *hw,
struct ixgbe_hw_func_caps *func_p,
- struct ixgbe_aci_cmd_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
func_p->guar_num_vsi = ixgbe_get_num_per_func(hw, IXGBE_MAX_VSI);
}
@@ -818,10 +818,10 @@ static void ixgbe_parse_func_caps(struct ixgbe_hw *hw,
struct ixgbe_hw_func_caps *func_p,
void *buf, u32 cap_count)
{
- struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
+ struct libie_aqc_list_caps_elem *cap_resp;
u32 i;
- cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
+ cap_resp = (struct libie_aqc_list_caps_elem *)buf;
memset(func_p, 0, sizeof(*func_p));
@@ -832,10 +832,10 @@ static void ixgbe_parse_func_caps(struct ixgbe_hw *hw,
&cap_resp[i], "func caps");
switch (cap) {
- case IXGBE_ACI_CAPS_VF:
+ case LIBIE_AQC_CAPS_VF:
ixgbe_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
break;
- case IXGBE_ACI_CAPS_VSI:
+ case LIBIE_AQC_CAPS_VSI:
ixgbe_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
break;
default:
@@ -869,8 +869,8 @@ static void ixgbe_parse_func_caps(struct ixgbe_hw *hw,
int ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
u32 *cap_count, enum ixgbe_aci_opc opc)
{
- struct ixgbe_aci_cmd_list_caps *cmd;
- struct ixgbe_aci_desc desc;
+ struct libie_aqc_list_caps *cmd;
+ struct libie_aq_desc desc;
int err;
cmd = &desc.params.get_cap;
@@ -914,7 +914,7 @@ int ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
* possible size that firmware can return.
*/
cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
- sizeof(struct ixgbe_aci_cmd_list_caps_elem);
+ sizeof(struct libie_aqc_list_caps_elem);
err = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
&cap_count,
@@ -953,7 +953,7 @@ int ixgbe_discover_func_caps(struct ixgbe_hw *hw,
* possible size that firmware can return.
*/
cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
- sizeof(struct ixgbe_aci_cmd_list_caps_elem);
+ sizeof(struct libie_aqc_list_caps_elem);
err = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
&cap_count,
@@ -996,9 +996,9 @@ int ixgbe_get_caps(struct ixgbe_hw *hw)
int ixgbe_aci_disable_rxen(struct ixgbe_hw *hw)
{
struct ixgbe_aci_cmd_disable_rxen *cmd;
- struct ixgbe_aci_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.disable_rxen;
+ cmd = libie_aq_raw(&desc);
ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_disable_rxen);
@@ -1024,10 +1024,10 @@ int ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
{
struct ixgbe_aci_cmd_get_phy_caps *cmd;
u16 pcaps_size = sizeof(*pcaps);
- struct ixgbe_aci_desc desc;
+ struct libie_aq_desc desc;
int err;
- cmd = &desc.params.get_phy;
+ cmd = libie_aq_raw(&desc);
if (!pcaps || (report_mode & ~IXGBE_ACI_REPORT_MODE_M))
return -EINVAL;
@@ -1091,18 +1091,20 @@ void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
int ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
{
- struct ixgbe_aci_desc desc;
+ struct ixgbe_aci_cmd_set_phy_cfg *cmd;
+ struct libie_aq_desc desc;
int err;
if (!cfg)
return -EINVAL;
+ cmd = libie_aq_raw(&desc);
/* Ensure that only valid bits of cfg->caps can be turned on. */
cfg->caps &= IXGBE_ACI_PHY_ENA_VALID_MASK;
ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_phy_cfg);
- desc.params.set_phy.lport_num = hw->bus.func;
- desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD);
+ cmd->lport_num = hw->bus.func;
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
err = ixgbe_aci_send_cmd(hw, &desc, cfg, sizeof(*cfg));
if (!err)
@@ -1123,9 +1125,9 @@ int ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
int ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link)
{
struct ixgbe_aci_cmd_restart_an *cmd;
- struct ixgbe_aci_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.restart_an;
+ cmd = libie_aq_raw(&desc);
ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_restart_an);
@@ -1151,9 +1153,9 @@ int ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link)
static bool ixgbe_is_media_cage_present(struct ixgbe_hw *hw)
{
struct ixgbe_aci_cmd_get_link_topo *cmd;
- struct ixgbe_aci_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.get_link_topo;
+ cmd = libie_aq_raw(&desc);
ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
@@ -1346,7 +1348,7 @@ int ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
struct ixgbe_aci_cmd_get_link_status *resp;
struct ixgbe_link_status *li_old, *li;
struct ixgbe_fc_info *hw_fc_info;
- struct ixgbe_aci_desc desc;
+ struct libie_aq_desc desc;
bool tx_pause, rx_pause;
u8 cmd_flags;
int err;
@@ -1360,7 +1362,7 @@ int ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
cmd_flags = (ena_lse) ? IXGBE_ACI_LSE_ENA : IXGBE_ACI_LSE_DIS;
- resp = &desc.params.get_link_status;
+ resp = libie_aq_raw(&desc);
resp->cmd_flags = cpu_to_le16(cmd_flags);
resp->lport_num = hw->bus.func;
@@ -1423,9 +1425,9 @@ int ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
int ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask)
{
struct ixgbe_aci_cmd_set_event_mask *cmd;
- struct ixgbe_aci_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.set_event_mask;
+ cmd = libie_aq_raw(&desc);
ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_event_mask);
@@ -1496,9 +1498,9 @@ static int ixgbe_start_hw_e610(struct ixgbe_hw *hw)
int ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode)
{
struct ixgbe_aci_cmd_set_port_id_led *cmd;
- struct ixgbe_aci_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.set_port_id_led;
+ cmd = libie_aq_raw(&desc);
ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_port_id_led);
@@ -2260,19 +2262,20 @@ int ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
struct ixgbe_aci_cmd_get_link_topo *cmd,
u8 *node_part_number, u16 *node_handle)
{
- struct ixgbe_aci_desc desc;
+ struct ixgbe_aci_cmd_get_link_topo *resp;
+ struct libie_aq_desc desc;
ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
- desc.params.get_link_topo = *cmd;
+ resp = libie_aq_raw(&desc);
+ *resp = *cmd;
if (ixgbe_aci_send_cmd(hw, &desc, NULL, 0))
return -EOPNOTSUPP;
if (node_handle)
- *node_handle =
- le16_to_cpu(desc.params.get_link_topo.addr.handle);
+ *node_handle = le16_to_cpu(resp->addr.handle);
if (node_part_number)
- *node_part_number = desc.params.get_link_topo.node_part_num;
+ *node_part_number = resp->node_part_num;
return 0;
}
@@ -2286,8 +2289,7 @@ int ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
*
* Return: the exit code of the operation.
*/
-int ixgbe_acquire_nvm(struct ixgbe_hw *hw,
- enum ixgbe_aci_res_access_type access)
+int ixgbe_acquire_nvm(struct ixgbe_hw *hw, enum libie_aq_res_access_type access)
{
u32 fla;
@@ -2296,7 +2298,7 @@ int ixgbe_acquire_nvm(struct ixgbe_hw *hw,
if ((fla & IXGBE_GLNVM_FLA_LOCKED_M) == 0)
return 0;
- return ixgbe_acquire_res(hw, IXGBE_NVM_RES_ID, access,
+ return ixgbe_acquire_res(hw, LIBIE_AQC_RES_ID_NVM, access,
IXGBE_NVM_TIMEOUT);
}
@@ -2315,7 +2317,7 @@ void ixgbe_release_nvm(struct ixgbe_hw *hw)
if ((fla & IXGBE_GLNVM_FLA_LOCKED_M) == 0)
return;
- ixgbe_release_res(hw, IXGBE_NVM_RES_ID);
+ ixgbe_release_res(hw, LIBIE_AQC_RES_ID_NVM);
}
/**
@@ -2337,12 +2339,12 @@ int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
bool read_shadow_ram)
{
struct ixgbe_aci_cmd_nvm *cmd;
- struct ixgbe_aci_desc desc;
+ struct libie_aq_desc desc;
if (offset > IXGBE_ACI_NVM_MAX_OFFSET)
return -EINVAL;
- cmd = &desc.params.nvm;
+ cmd = libie_aq_raw(&desc);
ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_read);
@@ -2372,7 +2374,7 @@ int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
int ixgbe_aci_erase_nvm(struct ixgbe_hw *hw, u16 module_typeid)
{
struct ixgbe_aci_cmd_nvm *cmd;
- struct ixgbe_aci_desc desc;
+ struct libie_aq_desc desc;
__le16 len;
int err;
@@ -2385,7 +2387,7 @@ int ixgbe_aci_erase_nvm(struct ixgbe_hw *hw, u16 module_typeid)
if (err)
return err;
- cmd = &desc.params.nvm;
+ cmd = libie_aq_raw(&desc);
ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_erase);
@@ -2416,9 +2418,9 @@ int ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid,
bool last_command, u8 command_flags)
{
struct ixgbe_aci_cmd_nvm *cmd;
- struct ixgbe_aci_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.nvm;
+ cmd = libie_aq_raw(&desc);
/* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000)
@@ -2436,7 +2438,7 @@ int ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid,
cmd->offset_high = FIELD_GET(IXGBE_ACI_NVM_OFFSET_HI_U_MASK, offset);
cmd->length = cpu_to_le16(length);
- desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
return ixgbe_aci_send_cmd(hw, &desc, data, length);
}
@@ -2467,10 +2469,10 @@ int ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags,
u8 *response_flags)
{
struct ixgbe_aci_cmd_nvm *cmd;
- struct ixgbe_aci_desc desc;
+ struct libie_aq_desc desc;
s32 err;
- cmd = &desc.params.nvm;
+ cmd = libie_aq_raw(&desc);
ixgbe_fill_dflt_direct_cmd_desc(&desc,
ixgbe_aci_opc_nvm_write_activate);
@@ -2498,14 +2500,14 @@ int ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags,
int ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw)
{
struct ixgbe_aci_cmd_nvm_checksum *cmd;
- struct ixgbe_aci_desc desc;
+ struct libie_aq_desc desc;
int err;
- err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_READ);
if (err)
return err;
- cmd = &desc.params.nvm_checksum;
+ cmd = libie_aq_raw(&desc);
ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
cmd->flags = IXGBE_ACI_NVM_CHECKSUM_VERIFY;
@@ -2541,7 +2543,7 @@ static int ixgbe_discover_flash_size(struct ixgbe_hw *hw)
u32 min_size = 0, max_size = IXGBE_ACI_NVM_MAX_OFFSET + 1;
int err;
- err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_READ);
if (err)
return err;
@@ -2552,7 +2554,7 @@ static int ixgbe_discover_flash_size(struct ixgbe_hw *hw)
err = ixgbe_read_flat_nvm(hw, offset, &len, &data, false);
if (err == -EIO &&
- hw->aci.last_status == IXGBE_ACI_RC_EINVAL) {
+ hw->aci.last_status == LIBIE_AQ_RC_EINVAL) {
err = 0;
max_size = offset;
} else if (!err) {
@@ -2805,7 +2807,7 @@ static int ixgbe_read_flash_module(struct ixgbe_hw *hw,
if (!start)
return -EINVAL;
- err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_READ);
if (err)
return err;
@@ -3389,7 +3391,7 @@ int ixgbe_get_flash_data(struct ixgbe_hw *hw)
*/
int ixgbe_aci_nvm_update_empr(struct ixgbe_hw *hw)
{
- struct ixgbe_aci_desc desc;
+ struct libie_aq_desc desc;
ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_update_empr);
@@ -3415,15 +3417,15 @@ int ixgbe_nvm_set_pkg_data(struct ixgbe_hw *hw, bool del_pkg_data_flag,
u8 *data, u16 length)
{
struct ixgbe_aci_cmd_nvm_pkg_data *cmd;
- struct ixgbe_aci_desc desc;
+ struct libie_aq_desc desc;
if (length != 0 && !data)
return -EINVAL;
- cmd = &desc.params.pkg_data;
+ cmd = libie_aq_raw(&desc);
ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_pkg_data);
- desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
if (del_pkg_data_flag)
cmd->cmd_flags |= IXGBE_ACI_NVM_PKG_DELETE;
@@ -3453,17 +3455,17 @@ int ixgbe_nvm_pass_component_tbl(struct ixgbe_hw *hw, u8 *data, u16 length,
u8 *comp_response_code)
{
struct ixgbe_aci_cmd_nvm_pass_comp_tbl *cmd;
- struct ixgbe_aci_desc desc;
+ struct libie_aq_desc desc;
int err;
if (!data || !comp_response || !comp_response_code)
return -EINVAL;
- cmd = &desc.params.pass_comp_tbl;
+ cmd = libie_aq_raw(&desc);
ixgbe_fill_dflt_direct_cmd_desc(&desc,
ixgbe_aci_opc_nvm_pass_component_tbl);
- desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
cmd->transfer_flag = transfer_flag;
err = ixgbe_aci_send_cmd(hw, &desc, data, length);
@@ -3617,7 +3619,7 @@ int ixgbe_read_ee_aci_e610(struct ixgbe_hw *hw, u16 offset, u16 *data)
return err;
}
- err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_READ);
if (err)
return err;
@@ -3650,7 +3652,7 @@ int ixgbe_read_ee_aci_buffer_e610(struct ixgbe_hw *hw, u16 offset,
return err;
}
- err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_READ);
if (err)
return err;
@@ -3690,7 +3692,7 @@ int ixgbe_validate_eeprom_checksum_e610(struct ixgbe_hw *hw, u16 *checksum_val)
if (checksum_val) {
u16 tmp_checksum;
- err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_READ);
if (err)
return err;
@@ -3965,6 +3967,10 @@ static const struct ixgbe_mac_operations mac_ops_e610 = {
.prot_autoc_write = prot_autoc_write_generic,
.setup_fc = ixgbe_setup_fc_e610,
.fc_autoneg = ixgbe_fc_autoneg_e610,
+ .enable_mdd = ixgbe_enable_mdd_x550,
+ .disable_mdd = ixgbe_disable_mdd_x550,
+ .restore_mdd_vf = ixgbe_restore_mdd_vf_x550,
+ .handle_mdd = ixgbe_handle_mdd_x550,
};
static const struct ixgbe_phy_operations phy_ops_e610 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h
index bb31d65bd1c8..782c489b0fa7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h
@@ -6,15 +6,15 @@
#include "ixgbe_type.h"
-int ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
+int ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct libie_aq_desc *desc,
void *buf, u16 buf_size);
bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw);
int ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
bool *pending);
-void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode);
-int ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
- enum ixgbe_aci_res_access_type access, u32 timeout);
-void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res);
+void ixgbe_fill_dflt_direct_cmd_desc(struct libie_aq_desc *desc, u16 opcode);
+int ixgbe_acquire_res(struct ixgbe_hw *hw, enum libie_aq_res_id res,
+ enum libie_aq_res_access_type access, u32 timeout);
+void ixgbe_release_res(struct ixgbe_hw *hw, enum libie_aq_res_id res);
int ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
u32 *cap_count, enum ixgbe_aci_opc opc);
int ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
@@ -62,7 +62,7 @@ int ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
struct ixgbe_aci_cmd_get_link_topo *cmd,
u8 *node_part_number, u16 *node_handle);
int ixgbe_acquire_nvm(struct ixgbe_hw *hw,
- enum ixgbe_aci_res_access_type access);
+ enum libie_aq_res_access_type access);
void ixgbe_release_nvm(struct ixgbe_hw *hw);
int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index d8a919ab7027..25c3a09ad7f1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1033,6 +1033,14 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[1144] = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
}
+static void ixgbe_get_link_ext_stats(struct net_device *netdev,
+ struct ethtool_link_ext_stats *stats)
+{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+
+ stats->link_down_events = adapter->link_down_events;
+}
+
static int ixgbe_get_eeprom_len(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
@@ -2745,9 +2753,11 @@ static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
return 0;
}
-static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+static int ixgbe_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
+
cmd->data = 0;
/* Report default options for RSS on ixgbe */
@@ -2817,9 +2827,6 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRLALL:
ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
break;
- case ETHTOOL_GRXFH:
- ret = ixgbe_get_rss_hash_opts(adapter, cmd);
- break;
default:
break;
}
@@ -3071,9 +3078,11 @@ static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
#define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
-static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
- struct ethtool_rxnfc *nfc)
+static int ixgbe_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
u32 flags2 = adapter->flags2;
/*
@@ -3196,9 +3205,6 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
case ETHTOOL_SRXCLSRLDEL:
ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
break;
- case ETHTOOL_SRXFH:
- ret = ixgbe_set_rss_hash_opt(adapter, cmd);
- break;
default:
break;
}
@@ -3719,6 +3725,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_wol = ixgbe_set_wol,
.nway_reset = ixgbe_nway_reset,
.get_link = ethtool_op_get_link,
+ .get_link_ext_stats = ixgbe_get_link_ext_stats,
.get_eeprom_len = ixgbe_get_eeprom_len,
.get_eeprom = ixgbe_get_eeprom,
.set_eeprom = ixgbe_set_eeprom,
@@ -3742,6 +3749,8 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_rxfh_key_size = ixgbe_get_rxfh_key_size,
.get_rxfh = ixgbe_get_rxfh,
.set_rxfh = ixgbe_set_rxfh,
+ .get_rxfh_fields = ixgbe_get_rxfh_fields,
+ .set_rxfh_fields = ixgbe_set_rxfh_fields,
.get_eee = ixgbe_get_eee,
.set_eee = ixgbe_set_eee,
.get_channels = ixgbe_get_channels,
@@ -3764,6 +3773,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops_e610 = {
.set_wol = ixgbe_set_wol_e610,
.nway_reset = ixgbe_nway_reset,
.get_link = ethtool_op_get_link,
+ .get_link_ext_stats = ixgbe_get_link_ext_stats,
.get_eeprom_len = ixgbe_get_eeprom_len,
.get_eeprom = ixgbe_get_eeprom,
.set_eeprom = ixgbe_set_eeprom,
@@ -3787,6 +3797,8 @@ static const struct ethtool_ops ixgbe_ethtool_ops_e610 = {
.get_rxfh_key_size = ixgbe_get_rxfh_key_size,
.get_rxfh = ixgbe_get_rxfh,
.set_rxfh = ixgbe_set_rxfh,
+ .get_rxfh_fields = ixgbe_get_rxfh_fields,
+ .set_rxfh_fields = ixgbe_set_rxfh_fields,
.get_eee = ixgbe_get_eee,
.set_eee = ixgbe_set_eee,
.get_channels = ixgbe_get_channels,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 7dcf6ecd157b..011fda9c6193 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -744,7 +744,7 @@ void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
* ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources
* @adapter: ixgbe adapter
*
- * Sets up ddp context resouces
+ * Sets up ddp context resources
*
* Returns : 0 indicates success or -EINVAL on failure
*/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c
index 49d3b66add7e..e5479fc07a07 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c
@@ -593,7 +593,7 @@ static int ixgbe_cancel_pending_update(struct ixgbe_adapter *adapter,
"Canceling previous pending update",
component, 0, 0);
- err = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_WRITE);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed to acquire device flash lock");
@@ -686,7 +686,7 @@ int ixgbe_flash_pldm_image(struct devlink *devlink,
if (err)
return err;
- err = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_WRITE);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed to acquire device flash lock");
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 336d47ffb95a..170a29d162c6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -891,7 +891,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
q_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
IXGBE_ITR_ADAPTIVE_LATENCY;
- /* intialize ITR */
+ /* initialize ITR */
if (txr_count && !rxr_count) {
/* tx only vector */
if (adapter->tx_itr_setting == 1)
@@ -1293,7 +1293,8 @@ void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
/* set bits to identify this as an advanced context descriptor */
- type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
+ type_tucmd |= IXGBE_TXD_CMD_DEXT |
+ FIELD_PREP(IXGBE_ADVTXD_DTYP_MASK, IXGBE_ADVTXD_DTYP_CTXT);
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
context_desc->fceof_saidx = cpu_to_le32(fceof_saidx);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index cba860f0e1f1..6122a0abb41f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -9,6 +9,7 @@
#include <linux/string.h>
#include <linux/in.h>
#include <linux/interrupt.h>
+#include <linux/iopoll.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/sctp.h>
@@ -1040,6 +1041,48 @@ static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
return ((head <= tail) ? tail : tail + ring->count) - head;
}
+/**
+ * ixgbe_get_vf_idx - provide VF index number based on queue index
+ * @adapter: pointer to the adapter struct
+ * @queue: Tx queue identifier
+ * @vf: output VF index
+ *
+ * Provide VF index number associated to the input queue.
+ *
+ * Returns: 0 if VF provided or error number.
+ */
+static int ixgbe_get_vf_idx(struct ixgbe_adapter *adapter, u16 queue, u16 *vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 queue_count;
+ u32 reg;
+
+ if (queue >= adapter->num_tx_queues)
+ return -EINVAL;
+
+ /* Determine number of queues by checking
+ * number of virtual functions
+ */
+ reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ switch (reg & IXGBE_GCR_EXT_VT_MODE_MASK) {
+ case IXGBE_GCR_EXT_VT_MODE_64:
+ queue_count = IXGBE_64VFS_QUEUES;
+ break;
+ case IXGBE_GCR_EXT_VT_MODE_32:
+ queue_count = IXGBE_32VFS_QUEUES;
+ break;
+ case IXGBE_GCR_EXT_VT_MODE_16:
+ queue_count = IXGBE_16VFS_QUEUES;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *vf = queue / queue_count;
+
+ return 0;
+}
+
static bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
{
u32 tx_done = ixgbe_get_tx_completed(tx_ring);
@@ -1159,6 +1202,150 @@ void ixgbe_update_rx_ring_stats(struct ixgbe_ring *rx_ring,
}
/**
+ * ixgbe_pf_handle_tx_hang - handle Tx hang on PF
+ * @tx_ring: tx ring number
+ * @next: next ring
+ *
+ * Prints a message containing details about the tx hang.
+ */
+static void ixgbe_pf_handle_tx_hang(struct ixgbe_ring *tx_ring,
+ unsigned int next)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ e_err(drv, "Detected Tx Unit Hang%s\n"
+ " Tx Queue <%d>\n"
+ " TDH, TDT <%x>, <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "tx_buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " jiffies <%lx>\n",
+ ring_is_xdp(tx_ring) ? " (XDP)" : "",
+ tx_ring->queue_index,
+ IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
+ IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
+ tx_ring->next_to_use, next,
+ tx_ring->tx_buffer_info[next].time_stamp, jiffies);
+
+ if (!ring_is_xdp(tx_ring))
+ netif_stop_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
+}
+
+/**
+ * ixgbe_vf_handle_tx_hang - handle Tx hang on VF
+ * @adapter: structure containing ring specific data
+ * @vf: VF index
+ *
+ * Print a message containing details about malicious driver detection.
+ * Set malicious VF link down if the detection happened several times.
+ */
+static void ixgbe_vf_handle_tx_hang(struct ixgbe_adapter *adapter, u16 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (adapter->hw.mac.type != ixgbe_mac_e610)
+ return;
+
+ e_warn(drv,
+ "Malicious Driver Detection tx hang detected on PF %d VF %d MAC: %pM",
+ hw->bus.func, vf, adapter->vfinfo[vf].vf_mac_addresses);
+
+ adapter->tx_hang_count[vf]++;
+ if (adapter->tx_hang_count[vf] == IXGBE_MAX_TX_VF_HANGS) {
+ ixgbe_set_vf_link_state(adapter, vf,
+ IFLA_VF_LINK_STATE_DISABLE);
+ adapter->tx_hang_count[vf] = 0;
+ }
+}
+
+static u32 ixgbe_poll_tx_icache(struct ixgbe_hw *hw, u16 queue, u16 idx)
+{
+ IXGBE_WRITE_REG(hw, IXGBE_TXDESCIC, queue * idx);
+ return IXGBE_READ_REG(hw, IXGBE_TXDESCIC);
+}
+
+/**
+ * ixgbe_check_illegal_queue - search for queue with illegal packet
+ * @adapter: structure containing ring specific data
+ * @queue: queue index
+ *
+ * Check if tx descriptor connected with input queue
+ * contains illegal packet.
+ *
+ * Returns: true if queue contain illegal packet.
+ */
+static bool ixgbe_check_illegal_queue(struct ixgbe_adapter *adapter,
+ u16 queue)
+{
+ u32 hdr_len_reg, mss_len_reg, type_reg;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 mss_len, header_len, reg;
+
+ for (u16 i = 0; i < IXGBE_MAX_TX_DESCRIPTORS; i++) {
+ /* HW will clear bit IXGBE_TXDESCIC_READY when address
+ * is written to address field. HW will set this bit
+ * when iCache read is done, and data is ready at TIC_DWx.
+ * Set descriptor address.
+ */
+ read_poll_timeout(ixgbe_poll_tx_icache, reg,
+ !(reg & IXGBE_TXDESCIC_READY), 0, 0, false,
+ hw, queue, i);
+
+ /* read tx descriptor access registers */
+ hdr_len_reg = IXGBE_READ_REG(hw, IXGBE_TIC_DW2(IXGBE_VLAN_MACIP_LENS_REG));
+ type_reg = IXGBE_READ_REG(hw, IXGBE_TIC_DW2(IXGBE_TYPE_TUCMD_MLHL));
+ mss_len_reg = IXGBE_READ_REG(hw, IXGBE_TIC_DW2(IXGBE_MSS_L4LEN_IDX));
+
+ /* check if Advanced Context Descriptor */
+ if (FIELD_GET(IXGBE_ADVTXD_DTYP_MASK, type_reg) !=
+ IXGBE_ADVTXD_DTYP_CTXT)
+ continue;
+
+ /* check for illegal MSS and Header length */
+ mss_len = FIELD_GET(IXGBE_ADVTXD_MSS_MASK, mss_len_reg);
+ header_len = FIELD_GET(IXGBE_ADVTXD_HEADER_LEN_MASK,
+ hdr_len_reg);
+ if ((mss_len + header_len) > SZ_16K) {
+ e_warn(probe, "mss len + header len too long\n");
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * ixgbe_handle_mdd_event - handle mdd event
+ * @adapter: structure containing ring specific data
+ * @tx_ring: tx descriptor ring to handle
+ *
+ * Reset VF driver if malicious vf detected or
+ * illegal packet in an any queue detected.
+ */
+static void ixgbe_handle_mdd_event(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *tx_ring)
+{
+ u16 vf, q;
+
+ if (adapter->vfinfo && ixgbe_check_mdd_event(adapter)) {
+ /* vf mdd info and malicious vf detected */
+ if (!ixgbe_get_vf_idx(adapter, tx_ring->queue_index, &vf))
+ ixgbe_vf_handle_tx_hang(adapter, vf);
+ } else {
+ /* malicious vf not detected */
+ for (q = 0; q < IXGBE_MAX_TX_QUEUES; q++) {
+ if (ixgbe_check_illegal_queue(adapter, q) &&
+ !ixgbe_get_vf_idx(adapter, q, &vf))
+ /* illegal queue detected */
+ ixgbe_vf_handle_tx_hang(adapter, vf);
+ }
+ }
+}
+
+/**
* ixgbe_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: structure containing interrupt and ring information
* @tx_ring: tx ring to clean
@@ -1265,26 +1452,10 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
adapter->tx_ipsec += total_ipsec;
if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
- /* schedule immediate reset if we believe we hung */
- struct ixgbe_hw *hw = &adapter->hw;
- e_err(drv, "Detected Tx Unit Hang %s\n"
- " Tx Queue <%d>\n"
- " TDH, TDT <%x>, <%x>\n"
- " next_to_use <%x>\n"
- " next_to_clean <%x>\n"
- "tx_buffer_info[next_to_clean]\n"
- " time_stamp <%lx>\n"
- " jiffies <%lx>\n",
- ring_is_xdp(tx_ring) ? "(XDP)" : "",
- tx_ring->queue_index,
- IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
- IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
- tx_ring->next_to_use, i,
- tx_ring->tx_buffer_info[i].time_stamp, jiffies);
-
- if (!ring_is_xdp(tx_ring))
- netif_stop_subqueue(tx_ring->netdev,
- tx_ring->queue_index);
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ ixgbe_handle_mdd_event(adapter, tx_ring);
+
+ ixgbe_pf_handle_tx_hang(tx_ring, i);
e_info(probe,
"tx hang %d detected on queue %d, resetting adapter\n",
@@ -2200,7 +2371,7 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
struct sk_buff *skb;
/* Prefetch first cache line of first page. If xdp->data_meta
- * is unused, this points extactly as xdp->data, otherwise we
+ * is unused, this points exactly as xdp->data, otherwise we
* likely have a consumer accessing first few bytes of meta
* data, and then actual data.
*/
@@ -2323,7 +2494,7 @@ static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring,
* This function provides a "bounce buffer" approach to Rx interrupt
* processing. The advantage to this is that on systems that have
* expensive overhead for IOMMU access this provides a means of avoiding
- * it by maintaining the mapping of the page to the syste.
+ * it by maintaining the mapping of the page to the system.
*
* Returns amount of work completed
**/
@@ -3933,8 +4104,12 @@ void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
#endif
{
- int i;
bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+
+ if (hw->mac.ops.disable_mdd)
+ hw->mac.ops.disable_mdd(hw);
if (adapter->ixgbe_ieee_pfc)
pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
@@ -3956,6 +4131,9 @@ static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++)
ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
}
+
+ if (hw->mac.ops.enable_mdd)
+ hw->mac.ops.enable_mdd(hw);
}
#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
@@ -4885,7 +5063,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
/* pull VLAN ID from VLVF */
vid = vlvf & VLAN_VID_MASK;
- /* only concern outselves with a certain range */
+ /* only concern ourselves with a certain range */
if (vid < vid_start || vid >= vid_end)
continue;
@@ -7964,6 +8142,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
netif_carrier_on(netdev);
ixgbe_check_vf_rate_limit(adapter);
+ if (adapter->num_vfs && hw->mac.ops.enable_mdd)
+ hw->mac.ops.enable_mdd(hw);
+
/* enable transmits */
netif_tx_wake_all_queues(adapter->netdev);
@@ -7991,6 +8172,8 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
if (!netif_carrier_ok(netdev))
return;
+ adapter->link_down_events++;
+
/* poll for SFP+ cable when link is down */
if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
@@ -9439,10 +9622,6 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
switch (cmd) {
- case SIOCSHWTSTAMP:
- return ixgbe_ptp_set_ts_config(adapter, req);
- case SIOCGHWTSTAMP:
- return ixgbe_ptp_get_ts_config(adapter, req);
case SIOCGMIIPHY:
if (!adapter->hw.phy.ops.read_reg)
return -EOPNOTSUPP;
@@ -10906,6 +11085,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_bpf = ixgbe_xdp,
.ndo_xdp_xmit = ixgbe_xdp_xmit,
.ndo_xsk_wakeup = ixgbe_xsk_wakeup,
+ .ndo_hwtstamp_get = ixgbe_ptp_hwtstamp_get,
+ .ndo_hwtstamp_set = ixgbe_ptp_hwtstamp_set,
};
static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter,
@@ -11108,7 +11289,7 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
* ixgbe_enumerate_functions - Get the number of ports this device has
* @adapter: adapter structure
*
- * This function enumerates the phsyical functions co-located on a single slot,
+ * This function enumerates the physical functions co-located on a single slot,
* in order to determine how many ports a device has. This is most useful in
* determining the required GT/s of PCIe bandwidth necessary for optimal
* performance.
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index bf65e82b4c61..4af149b63a39 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -34,7 +34,7 @@
#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
clear to send requests */
#define IXGBE_VT_MSGINFO_SHIFT 16
-/* bits 23:16 are used for exra info for certain messages */
+/* bits 23:16 are used for extra info for certain messages */
#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
/* definitions to support mailbox API version negotiation */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 2d54828bdfbb..2449e4cf2679 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1323,7 +1323,7 @@ int ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
* @hw: pointer to hardware structure
*
* Restart autonegotiation and PHY and waits for completion.
- * This function always returns success, this is nessary since
+ * This function always returns success, this is necessary since
* it is called via a function pointer that could call other
* functions that could return an error.
**/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index eef25e11d938..114dd88fc71c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -327,7 +327,7 @@ static void ixgbe_ptp_setup_sdp_X550(struct ixgbe_adapter *adapter)
* result of SYSTIME is 32bits of "billions of cycles" and 32 bits of
* "cycles", rather than seconds and nanoseconds.
*/
-static u64 ixgbe_ptp_read_X550(const struct cyclecounter *cc)
+static u64 ixgbe_ptp_read_X550(struct cyclecounter *cc)
{
struct ixgbe_adapter *adapter =
container_of(cc, struct ixgbe_adapter, hw_cc);
@@ -364,7 +364,7 @@ static u64 ixgbe_ptp_read_X550(const struct cyclecounter *cc)
* cyclecounter structure used to construct a ns counter from the
* arbitrary fixed point registers
*/
-static u64 ixgbe_ptp_read_82599(const struct cyclecounter *cc)
+static u64 ixgbe_ptp_read_82599(struct cyclecounter *cc)
{
struct ixgbe_adapter *adapter =
container_of(cc, struct ixgbe_adapter, hw_cc);
@@ -936,20 +936,22 @@ void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *q_vector,
}
/**
- * ixgbe_ptp_get_ts_config - get current hardware timestamping configuration
- * @adapter: pointer to adapter structure
- * @ifr: ioctl data
+ * ixgbe_ptp_hwtstamp_get - get current hardware timestamping configuration
+ * @netdev: pointer to net device structure
+ * @config: timestamping configuration structure
*
* This function returns the current timestamping settings. Rather than
* attempt to deconstruct registers to fill in the values, simply keep a copy
* of the old settings around, and return a copy when requested.
*/
-int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
+int ixgbe_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config *config = &adapter->tstamp_config;
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
- return copy_to_user(ifr->ifr_data, config,
- sizeof(*config)) ? -EFAULT : 0;
+ *config = adapter->tstamp_config;
+
+ return 0;
}
/**
@@ -978,7 +980,7 @@ int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
* mode, if required to support the specifically requested mode.
*/
static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
@@ -1129,31 +1131,29 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
}
/**
- * ixgbe_ptp_set_ts_config - user entry point for timestamp mode
- * @adapter: pointer to adapter struct
- * @ifr: ioctl data
+ * ixgbe_ptp_hwtstamp_set - user entry point for timestamp mode
+ * @netdev: pointer to net device structure
+ * @config: timestamping configuration structure
+ * @extack: netlink extended ack structure for error reporting
*
* Set hardware to requested mode. If unsupported, return an error with no
* changes. Otherwise, store the mode for future reference.
*/
-int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
+int ixgbe_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int err;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = ixgbe_ptp_set_timestamp_mode(adapter, &config);
+ err = ixgbe_ptp_set_timestamp_mode(adapter, config);
if (err)
return err;
/* save these settings for future reference */
- memcpy(&adapter->tstamp_config, &config,
- sizeof(adapter->tstamp_config));
+ adapter->tstamp_config = *config;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
static void ixgbe_ptp_link_speed_adjust(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 0dbbd2befd4d..32ac1e020d91 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -207,6 +207,7 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs)
int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
{
unsigned int num_vfs = adapter->num_vfs, vf;
+ struct ixgbe_hw *hw = &adapter->hw;
unsigned long flags;
int rss;
@@ -237,6 +238,9 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return 0;
+ if (hw->mac.ops.disable_mdd)
+ hw->mac.ops.disable_mdd(hw);
+
#ifdef CONFIG_PCI_IOV
/*
* If our VFs are assigned we cannot shut down SR-IOV
@@ -702,7 +706,7 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
u32 reg_val;
u32 queue;
- /* remove VLAN filters beloning to this VF */
+ /* remove VLAN filters belonging to this VF */
ixgbe_clear_vf_vlans(adapter, vf);
/* add back PF assigned VLAN or VLAN 0 */
@@ -1353,12 +1357,59 @@ static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
ixgbe_write_mbx(hw, &msg, 1, vf);
}
+/**
+ * ixgbe_check_mdd_event - check for MDD event on all VFs
+ * @adapter: pointer to ixgbe adapter
+ *
+ * Return: true if there is a VF on which MDD event occurred, false otherwise.
+ */
+bool ixgbe_check_mdd_event(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ DECLARE_BITMAP(vf_bitmap, 64);
+ bool ret = false;
+ int i;
+
+ if (!hw->mac.ops.handle_mdd)
+ return false;
+
+ /* Did we have a malicious event */
+ bitmap_zero(vf_bitmap, 64);
+ hw->mac.ops.handle_mdd(hw, vf_bitmap);
+
+ /* Log any blocked queues and release lock */
+ for_each_set_bit(i, vf_bitmap, 64) {
+ dev_warn(&adapter->pdev->dev,
+ "Malicious event on VF %d tx:%x rx:%x\n", i,
+ IXGBE_READ_REG(hw, IXGBE_LVMMC_TX),
+ IXGBE_READ_REG(hw, IXGBE_LVMMC_RX));
+
+ if (hw->mac.ops.restore_mdd_vf) {
+ u32 ping;
+
+ hw->mac.ops.restore_mdd_vf(hw, i);
+
+ /* get the VF to rebuild its queues */
+ adapter->vfinfo[i].clear_to_send = 0;
+ ping = IXGBE_PF_CONTROL_MSG |
+ IXGBE_VT_MSGTYPE_CTS;
+ ixgbe_write_mbx(hw, &ping, 1, i);
+ }
+
+ ret = true;
+ }
+
+ return ret;
+}
+
void ixgbe_msg_task(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
unsigned long flags;
u32 vf;
+ ixgbe_check_mdd_event(adapter);
+
spin_lock_irqsave(&adapter->vfs_lock, flags);
for (vf = 0; vf < adapter->num_vfs; vf++) {
/* process any reset requests */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 0690ecb8dfa3..bc4cab976bf9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -15,6 +15,7 @@
#ifdef CONFIG_PCI_IOV
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
#endif
+bool ixgbe_check_mdd_event(struct ixgbe_adapter *adapter);
void ixgbe_msg_task(struct ixgbe_adapter *adapter);
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 892fa6c1f879..36577091cd9e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -238,7 +238,7 @@ struct ixgbe_thermal_sensor_data {
#define NVM_VER_INVALID 0xFFFF
#define NVM_ETK_VALID 0x8000
#define NVM_INVALID_PTR 0xFFFF
-#define NVM_VER_SIZE 32 /* version sting size */
+#define NVM_VER_SIZE 32 /* version string size */
struct ixgbe_nvm_version {
u32 etk_id;
@@ -402,6 +402,8 @@ struct ixgbe_nvm_version {
#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4))
#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4))
#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4))
+#define IXGBE_LVMMC_RX 0x2FA8
+#define IXGBE_LVMMC_TX 0x8108
#define IXGBE_WQBR_RX(_i) (0x2FB0 + ((_i) * 4)) /* 4 total */
#define IXGBE_WQBR_TX(_i) (0x8130 + ((_i) * 4)) /* 4 total */
#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/
@@ -1042,6 +1044,7 @@ struct ixgbe_nvm_version {
#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001
#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
+#define IXGBE_GCR_EXT_VT_MODE_MASK 0x00000003
#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
IXGBE_GCR_EXT_VT_MODE_64)
@@ -2021,7 +2024,7 @@ enum {
/* EEPROM Addressing bits based on type (0-small, 1-large) */
#define IXGBE_EEC_ADDR_SIZE 0x00000400
#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */
-#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */
+#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD allows 14 bits for addr. */
#define IXGBE_EEC_SIZE_SHIFT 11
#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6
@@ -2746,6 +2749,28 @@ enum ixgbe_fdir_pballoc_type {
#define FW_PHY_INFO_ID_HI_MASK 0xFFFF0000u
#define FW_PHY_INFO_ID_LO_MASK 0x0000FFFFu
+/* There are only 3 options for VFs creation on this device:
+ * 16 VFs pool with 8 queues each
+ * 32 VFs pool with 4 queues each
+ * 64 VFs pool with 2 queues each
+ *
+ * That means reading some VF registers that map VF to queue depending on
+ * chosen option. Define values that help dealing with each scenario.
+ */
+/* Number of queues based on VFs pool */
+#define IXGBE_16VFS_QUEUES 8
+#define IXGBE_32VFS_QUEUES 4
+#define IXGBE_64VFS_QUEUES 2
+/* Mask for getting queues bits based on VFs pool */
+#define IXGBE_16VFS_BITMASK GENMASK(IXGBE_16VFS_QUEUES - 1, 0)
+#define IXGBE_32VFS_BITMASK GENMASK(IXGBE_32VFS_QUEUES - 1, 0)
+#define IXGBE_64VFS_BITMASK GENMASK(IXGBE_64VFS_QUEUES - 1, 0)
+/* Convert queue index to register number.
+ * We have 4 registers with 32 queues in each.
+ */
+#define IXGBE_QUEUES_PER_REG 32
+#define IXGBE_QUEUES_REG_AMOUNT 4
+
/* Host Interface Command Structures */
struct ixgbe_hic_hdr {
u8 cmd;
@@ -2911,6 +2936,13 @@ struct ixgbe_adv_tx_context_desc {
__le32 mss_l4len_idx;
};
+enum {
+ IXGBE_VLAN_MACIP_LENS_REG = 0,
+ IXGBE_FCEOF_SAIDX_REG = 1,
+ IXGBE_TYPE_TUCMD_MLHL = 2,
+ IXGBE_MSS_L4LEN_IDX = 3,
+};
+
/* Adv Transmit Descriptor Config Masks */
#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */
#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */
@@ -2918,7 +2950,7 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */
#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */
#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
-#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */
+#define IXGBE_ADVTXD_DTYP_CTXT 0x2 /* Advanced Context Desc */
#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
@@ -2967,6 +2999,8 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_FCOEF_EOF_MASK (3u << 10) /* FC EOF index */
#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+#define IXGBE_ADVTXD_MSS_MASK GENMASK(31, IXGBE_ADVTXD_MSS_SHIFT)
+#define IXGBE_ADVTXD_HEADER_LEN_MASK GENMASK(8, 0)
/* Autonegotiation advertised speeds */
typedef u32 ixgbe_autoneg_advertised;
@@ -3539,6 +3573,12 @@ struct ixgbe_mac_operations {
int (*dmac_config_tcs)(struct ixgbe_hw *hw);
int (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *);
int (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32);
+
+ /* MDD events */
+ void (*enable_mdd)(struct ixgbe_hw *hw);
+ void (*disable_mdd)(struct ixgbe_hw *hw);
+ void (*restore_mdd_vf)(struct ixgbe_hw *hw, u32 vf);
+ void (*handle_mdd)(struct ixgbe_hw *hw, unsigned long *vf_bitmap);
};
struct ixgbe_phy_operations {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
index 09df67f03cf4..d2f22d8558f8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
@@ -4,6 +4,8 @@
#ifndef _IXGBE_TYPE_E610_H_
#define _IXGBE_TYPE_E610_H_
+#include <linux/net/intel/libie/adminq.h>
+
#define BYTES_PER_DWORD 4
/* General E610 defines */
@@ -135,60 +137,6 @@
/* [ms] timeout of waiting for resource release */
#define IXGBE_ACI_RELEASE_RES_TIMEOUT 10000
-/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */
-#define IXGBE_ACI_LG_BUF 512
-
-/* Flags sub-structure
- * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
- * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
- */
-
-#define IXGBE_ACI_FLAG_DD BIT(0) /* 0x1 */
-#define IXGBE_ACI_FLAG_CMP BIT(1) /* 0x2 */
-#define IXGBE_ACI_FLAG_ERR BIT(2) /* 0x4 */
-#define IXGBE_ACI_FLAG_VFE BIT(3) /* 0x8 */
-#define IXGBE_ACI_FLAG_LB BIT(9) /* 0x200 */
-#define IXGBE_ACI_FLAG_RD BIT(10) /* 0x400 */
-#define IXGBE_ACI_FLAG_VFC BIT(11) /* 0x800 */
-#define IXGBE_ACI_FLAG_BUF BIT(12) /* 0x1000 */
-#define IXGBE_ACI_FLAG_SI BIT(13) /* 0x2000 */
-#define IXGBE_ACI_FLAG_EI BIT(14) /* 0x4000 */
-#define IXGBE_ACI_FLAG_FE BIT(15) /* 0x8000 */
-
-/* Admin Command Interface (ACI) error codes */
-enum ixgbe_aci_err {
- IXGBE_ACI_RC_OK = 0, /* Success */
- IXGBE_ACI_RC_EPERM = 1, /* Operation not permitted */
- IXGBE_ACI_RC_ENOENT = 2, /* No such element */
- IXGBE_ACI_RC_ESRCH = 3, /* Bad opcode */
- IXGBE_ACI_RC_EINTR = 4, /* Operation interrupted */
- IXGBE_ACI_RC_EIO = 5, /* I/O error */
- IXGBE_ACI_RC_ENXIO = 6, /* No such resource */
- IXGBE_ACI_RC_E2BIG = 7, /* Arg too long */
- IXGBE_ACI_RC_EAGAIN = 8, /* Try again */
- IXGBE_ACI_RC_ENOMEM = 9, /* Out of memory */
- IXGBE_ACI_RC_EACCES = 10, /* Permission denied */
- IXGBE_ACI_RC_EFAULT = 11, /* Bad address */
- IXGBE_ACI_RC_EBUSY = 12, /* Device or resource busy */
- IXGBE_ACI_RC_EEXIST = 13, /* Object already exists */
- IXGBE_ACI_RC_EINVAL = 14, /* Invalid argument */
- IXGBE_ACI_RC_ENOTTY = 15, /* Not a typewriter */
- IXGBE_ACI_RC_ENOSPC = 16, /* No space left or alloc failure */
- IXGBE_ACI_RC_ENOSYS = 17, /* Function not implemented */
- IXGBE_ACI_RC_ERANGE = 18, /* Parameter out of range */
- IXGBE_ACI_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
- IXGBE_ACI_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
- IXGBE_ACI_RC_EMODE = 21, /* Op not allowed in current dev mode */
- IXGBE_ACI_RC_EFBIG = 22, /* File too big */
- IXGBE_ACI_RC_ESBCOMP = 23, /* SB-IOSF completion unsuccessful */
- IXGBE_ACI_RC_ENOSEC = 24, /* Missing security manifest */
- IXGBE_ACI_RC_EBADSIG = 25, /* Bad RSA signature */
- IXGBE_ACI_RC_ESVN = 26, /* SVN number prohibits this package */
- IXGBE_ACI_RC_EBADMAN = 27, /* Manifest hash mismatch */
- IXGBE_ACI_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */
- IXGBE_ACI_RC_EACCES_BMCU = 29, /* BMC Update in progress */
-};
-
/* Admin Command Interface (ACI) opcodes */
enum ixgbe_aci_opc {
ixgbe_aci_opc_get_ver = 0x0001,
@@ -265,33 +213,8 @@ enum ixgbe_aci_opc {
ixgbe_aci_opc_clear_health_status = 0xFF23,
};
-/* Get version (direct 0x0001) */
-struct ixgbe_aci_cmd_get_ver {
- __le32 rom_ver;
- __le32 fw_build;
- u8 fw_branch;
- u8 fw_major;
- u8 fw_minor;
- u8 fw_patch;
- u8 api_branch;
- u8 api_major;
- u8 api_minor;
- u8 api_patch;
-};
-
#define IXGBE_DRV_VER_STR_LEN_E610 32
-/* Send driver version (indirect 0x0002) */
-struct ixgbe_aci_cmd_driver_ver {
- u8 major_ver;
- u8 minor_ver;
- u8 build_ver;
- u8 subbuild_ver;
- u8 reserved[4];
- __le32 addr_high;
- __le32 addr_low;
-};
-
/* Get Expanded Error Code (0x0005, direct) */
struct ixgbe_aci_cmd_get_exp_err {
__le32 reason;
@@ -303,98 +226,6 @@ struct ixgbe_aci_cmd_get_exp_err {
/* FW update timeout definitions are in milliseconds */
#define IXGBE_NVM_TIMEOUT 180000
-enum ixgbe_aci_res_access_type {
- IXGBE_RES_READ = 1,
- IXGBE_RES_WRITE
-};
-
-enum ixgbe_aci_res_ids {
- IXGBE_NVM_RES_ID = 1,
- IXGBE_SPD_RES_ID,
- IXGBE_CHANGE_LOCK_RES_ID,
- IXGBE_GLOBAL_CFG_LOCK_RES_ID
-};
-
-/* Request resource ownership (direct 0x0008)
- * Release resource ownership (direct 0x0009)
- */
-struct ixgbe_aci_cmd_req_res {
- __le16 res_id;
- __le16 access_type;
-
- /* Upon successful completion, FW writes this value and driver is
- * expected to release resource before timeout. This value is provided
- * in milliseconds.
- */
- __le32 timeout;
-#define IXGBE_ACI_RES_NVM_READ_DFLT_TIMEOUT_MS 3000
-#define IXGBE_ACI_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000
-#define IXGBE_ACI_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000
-#define IXGBE_ACI_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000
- /* For SDP: pin ID of the SDP */
- __le32 res_number;
- __le16 status;
-#define IXGBE_ACI_RES_GLBL_SUCCESS 0
-#define IXGBE_ACI_RES_GLBL_IN_PROG 1
-#define IXGBE_ACI_RES_GLBL_DONE 2
- u8 reserved[2];
-};
-
-/* Get function capabilities (indirect 0x000A)
- * Get device capabilities (indirect 0x000B)
- */
-struct ixgbe_aci_cmd_list_caps {
- u8 cmd_flags;
- u8 pf_index;
- u8 reserved[2];
- __le32 count;
- __le32 addr_high;
- __le32 addr_low;
-};
-
-/* Device/Function buffer entry, repeated per reported capability */
-struct ixgbe_aci_cmd_list_caps_elem {
- __le16 cap;
-#define IXGBE_ACI_CAPS_VALID_FUNCTIONS 0x0005
-#define IXGBE_ACI_MAX_VALID_FUNCTIONS 0x8
-#define IXGBE_ACI_CAPS_SRIOV 0x0012
-#define IXGBE_ACI_CAPS_VF 0x0013
-#define IXGBE_ACI_CAPS_VMDQ 0x0014
-#define IXGBE_ACI_CAPS_VSI 0x0017
-#define IXGBE_ACI_CAPS_DCB 0x0018
-#define IXGBE_ACI_CAPS_RSS 0x0040
-#define IXGBE_ACI_CAPS_RXQS 0x0041
-#define IXGBE_ACI_CAPS_TXQS 0x0042
-#define IXGBE_ACI_CAPS_MSIX 0x0043
-#define IXGBE_ACI_CAPS_FD 0x0045
-#define IXGBE_ACI_CAPS_1588 0x0046
-#define IXGBE_ACI_CAPS_MAX_MTU 0x0047
-#define IXGBE_ACI_CAPS_NVM_VER 0x0048
-#define IXGBE_ACI_CAPS_PENDING_NVM_VER 0x0049
-#define IXGBE_ACI_CAPS_OROM_VER 0x004A
-#define IXGBE_ACI_CAPS_PENDING_OROM_VER 0x004B
-#define IXGBE_ACI_CAPS_PENDING_NET_VER 0x004D
-#define IXGBE_ACI_CAPS_INLINE_IPSEC 0x0070
-#define IXGBE_ACI_CAPS_NUM_ENABLED_PORTS 0x0072
-#define IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE 0x0076
-#define IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
-#define IXGBE_ACI_CAPS_NVM_MGMT 0x0080
-#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0 0x0081
-#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1 0x0082
-#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2 0x0083
-#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3 0x0084
- u8 major_ver;
- u8 minor_ver;
- /* Number of resources described by this capability */
- __le32 number;
- /* Only meaningful for some types of resources */
- __le32 logical_id;
- /* Only meaningful for some types of resources */
- __le32 phys_id;
- __le64 rsvd1;
- __le64 rsvd2;
-};
-
/* Disable RXEN (direct 0x000C) */
struct ixgbe_aci_cmd_disable_rxen {
u8 lport_num;
@@ -960,55 +791,6 @@ struct ixgbe_aci_cmd_nvm_comp_tbl {
u8 cvs[]; /* Component Version String */
} __packed;
-/**
- * struct ixgbe_aci_desc - Admin Command (AC) descriptor
- * @flags: IXGBE_ACI_FLAG_* flags
- * @opcode: Admin command opcode
- * @datalen: length in bytes of indirect/external data buffer
- * @retval: return value from firmware
- * @cookie_high: opaque data high-half
- * @cookie_low: opaque data low-half
- * @params: command-specific parameters
- *
- * Descriptor format for commands the driver posts via the
- * Admin Command Interface (ACI).
- * The firmware writes back onto the command descriptor and returns
- * the result of the command. Asynchronous events that are not an immediate
- * result of the command are written to the Admin Command Interface (ACI) using
- * the same descriptor format. Descriptors are in little-endian notation with
- * 32-bit words.
- */
-struct ixgbe_aci_desc {
- __le16 flags;
- __le16 opcode;
- __le16 datalen;
- __le16 retval;
- __le32 cookie_high;
- __le32 cookie_low;
- union {
- u8 raw[16];
- struct ixgbe_aci_cmd_get_ver get_ver;
- struct ixgbe_aci_cmd_driver_ver driver_ver;
- struct ixgbe_aci_cmd_get_exp_err exp_err;
- struct ixgbe_aci_cmd_req_res res_owner;
- struct ixgbe_aci_cmd_list_caps get_cap;
- struct ixgbe_aci_cmd_disable_rxen disable_rxen;
- struct ixgbe_aci_cmd_get_phy_caps get_phy;
- struct ixgbe_aci_cmd_set_phy_cfg set_phy;
- struct ixgbe_aci_cmd_restart_an restart_an;
- struct ixgbe_aci_cmd_get_link_status get_link_status;
- struct ixgbe_aci_cmd_set_event_mask set_event_mask;
- struct ixgbe_aci_cmd_set_port_id_led set_port_id_led;
- struct ixgbe_aci_cmd_get_link_topo get_link_topo;
- struct ixgbe_aci_cmd_get_link_topo_pin get_link_topo_pin;
- struct ixgbe_aci_cmd_sff_eeprom read_write_sff_param;
- struct ixgbe_aci_cmd_nvm nvm;
- struct ixgbe_aci_cmd_nvm_checksum nvm_checksum;
- struct ixgbe_aci_cmd_nvm_pkg_data pkg_data;
- struct ixgbe_aci_cmd_nvm_pass_comp_tbl pass_comp_tbl;
- } params;
-};
-
/* E610-specific adapter context structures */
struct ixgbe_link_status {
@@ -1172,7 +954,7 @@ struct ixgbe_hw_dev_caps {
/* ACI event information */
struct ixgbe_aci_event {
- struct ixgbe_aci_desc desc;
+ struct libie_aq_desc desc;
u8 *msg_buf;
u16 msg_len;
u16 buf_len;
@@ -1180,7 +962,7 @@ struct ixgbe_aci_event {
struct ixgbe_aci_info {
struct mutex lock; /* admin command interface lock */
- enum ixgbe_aci_err last_status; /* last status of sent admin command */
+ enum libie_aq_err last_status; /* last status of sent admin command */
};
enum ixgbe_bank_select {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index f1ab95aa8c83..c2353aed0120 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -47,7 +47,7 @@ int ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
}
/**
- * ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires
+ * ixgbe_setup_mac_link_X540 - Set the auto advertised capabilities
* @hw: pointer to hardware structure
* @speed: new link speed
* @autoneg_wait_to_complete: true when waiting for completion is needed
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 1d2acdb64f45..bfa647086c70 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -20,7 +20,7 @@ static int ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
struct ixgbe_phy_info *phy = &hw->phy;
struct ixgbe_link_info *link = &hw->link;
- /* Start with X540 invariants, since so simular */
+ /* Start with X540 invariants, since so similar */
ixgbe_get_invariants_X540(hw);
if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
@@ -48,7 +48,7 @@ static int ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw)
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
- /* Start with X540 invariants, since so simular */
+ /* Start with X540 invariants, since so similar */
ixgbe_get_invariants_X540(hw);
if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
@@ -685,7 +685,7 @@ static int ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
return 0;
}
-/** ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the
+/** ixgbe_read_iosf_sb_reg_x550 - Reads a value to specified register of the
* IOSF device
* @hw: pointer to hardware structure
* @reg_addr: 32 bit PHY register to write
@@ -847,7 +847,7 @@ static int ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
/** ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
* @hw: pointer to hardware structure
- * @offset: offset of word in the EEPROM to read
+ * @offset: offset of word in the EEPROM to read
* @words: number of words
* @data: word(s) read from the EEPROM
*
@@ -1253,7 +1253,7 @@ static int ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
/**
* ixgbe_fw_recovery_mode_X550 - Check FW NVM recovery mode
- * @hw: pointer t hardware structure
+ * @hw: pointer to hardware structure
*
* Returns true if in FW NVM recovery mode.
*/
@@ -1267,7 +1267,7 @@ static bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw)
/** ixgbe_disable_rx_x550 - Disable RX unit
*
- * Enables the Rx DMA unit for x550
+ * Disables the Rx DMA unit for x550
**/
static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
{
@@ -1754,7 +1754,7 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
/* If no SFP module present, then return success. Return success since
- * SFP not present error is not excepted in the setup MAC link flow.
+ * SFP not present error is not accepted in the setup MAC link flow.
*/
if (ret_val == -ENOENT)
return 0;
@@ -1804,7 +1804,7 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
/* If no SFP module present, then return success. Return success since
- * SFP not present error is not excepted in the setup MAC link flow.
+ * SFP not present error is not accepted in the setup MAC link flow.
*/
if (ret_val == -ENOENT)
return 0;
@@ -2324,7 +2324,7 @@ static int ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
* PHY interrupt is lsc
* @is_overtemp: indicate whether an overtemp event encountered
*
- * Determime if external Base T PHY interrupt cause is high temperature
+ * Determine if external Base T PHY interrupt cause is high temperature
* failure alarm or link status change.
**/
static int ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc,
@@ -2669,7 +2669,7 @@ static int ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
if (status)
return status;
- /* If link is not still up, then no setup is necessary so return */
+ /* If the link is still not up, no setup is necessary */
status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
if (status)
return status;
@@ -2768,7 +2768,7 @@ static int ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
* Sends driver version number to firmware through the manageability
* block. On success return 0
* else returns -EBUSY when encountering an error acquiring
- * semaphore, -EIO when command fails or -ENIVAL when incorrect
+ * semaphore, -EIO when command fails or -EINVAL when incorrect
* params passed.
**/
int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
@@ -3175,7 +3175,7 @@ static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
/* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
- * PHY address. This register field was has only been used for X552.
+ * PHY address. This register field has only been used for X552.
*/
if (hw->mac.type == ixgbe_mac_x550em_a &&
hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
@@ -3316,7 +3316,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
return media_type;
}
-/** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
+/** ixgbe_init_ext_t_x550em - Start (uninstall) the external Base T PHY.
** @hw: pointer to hardware structure
**/
static int ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
@@ -3735,7 +3735,7 @@ static int ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
* @hw: pointer to hardware structure
* @mask: Mask to specify which semaphore to release
*
- * Release the SWFW semaphore and puts the shared PHY token as needed
+ * Release the SWFW semaphore and puts back the shared PHY token as needed
*/
static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
{
@@ -3756,7 +3756,7 @@ static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
* @phy_data: Pointer to read data from PHY register
*
* Reads a value from a specified PHY register using the SWFW lock and PHY
- * Token. The PHY Token is needed since the MDIO is shared between to MAC
+ * Token. The PHY Token is needed since the MDIO is shared between two MAC
* instances.
*/
static int ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
@@ -3800,6 +3800,122 @@ static int ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
return status;
}
+static void ixgbe_set_mdd_x550(struct ixgbe_hw *hw, bool ena)
+{
+ u32 reg_dma, reg_rdr;
+
+ reg_dma = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ reg_rdr = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+
+ if (ena) {
+ reg_dma |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
+ reg_rdr |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
+ } else {
+ reg_dma &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
+ reg_rdr &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_dma);
+ IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_rdr);
+}
+
+/**
+ * ixgbe_enable_mdd_x550 - enable malicious driver detection
+ * @hw: pointer to hardware structure
+ */
+void ixgbe_enable_mdd_x550(struct ixgbe_hw *hw)
+{
+ ixgbe_set_mdd_x550(hw, true);
+}
+
+/**
+ * ixgbe_disable_mdd_x550 - disable malicious driver detection
+ * @hw: pointer to hardware structure
+ */
+void ixgbe_disable_mdd_x550(struct ixgbe_hw *hw)
+{
+ ixgbe_set_mdd_x550(hw, false);
+}
+
+/**
+ * ixgbe_restore_mdd_vf_x550 - restore VF that was disabled during MDD event
+ * @hw: pointer to hardware structure
+ * @vf: vf index
+ */
+void ixgbe_restore_mdd_vf_x550(struct ixgbe_hw *hw, u32 vf)
+{
+ u32 idx, reg, val, num_qs, start_q, bitmask;
+
+ /* Map VF to queues */
+ reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ switch (reg & IXGBE_MRQC_MRQE_MASK) {
+ case IXGBE_MRQC_VMDQRT8TCEN:
+ num_qs = IXGBE_16VFS_QUEUES;
+ bitmask = IXGBE_16VFS_BITMASK;
+ break;
+ case IXGBE_MRQC_VMDQRSS32EN:
+ case IXGBE_MRQC_VMDQRT4TCEN:
+ num_qs = IXGBE_32VFS_QUEUES;
+ bitmask = IXGBE_32VFS_BITMASK;
+ break;
+ default:
+ num_qs = IXGBE_64VFS_QUEUES;
+ bitmask = IXGBE_64VFS_BITMASK;
+ break;
+ }
+ start_q = vf * num_qs;
+
+ /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */
+ idx = start_q / IXGBE_QUEUES_PER_REG;
+ val = bitmask << (start_q % IXGBE_QUEUES_PER_REG);
+ IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), val);
+ IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), val);
+}
+
+/**
+ * ixgbe_handle_mdd_x550 - handle malicious driver detection event
+ * @hw: pointer to hardware structure
+ * @vf_bitmap: output vf bitmap of malicious vfs
+ */
+void ixgbe_handle_mdd_x550(struct ixgbe_hw *hw, unsigned long *vf_bitmap)
+{
+ u32 i, j, reg, q, div, vf;
+ unsigned long wqbr;
+
+ /* figure out pool size for mapping to vf's */
+ reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ switch (reg & IXGBE_MRQC_MRQE_MASK) {
+ case IXGBE_MRQC_VMDQRT8TCEN:
+ div = IXGBE_16VFS_QUEUES;
+ break;
+ case IXGBE_MRQC_VMDQRSS32EN:
+ case IXGBE_MRQC_VMDQRT4TCEN:
+ div = IXGBE_32VFS_QUEUES;
+ break;
+ default:
+ div = IXGBE_64VFS_QUEUES;
+ break;
+ }
+
+ /* Read WQBR_TX and WQBR_RX and check for malicious queues */
+ for (i = 0; i < IXGBE_QUEUES_REG_AMOUNT; i++) {
+ wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i)) |
+ IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
+ if (!wqbr)
+ continue;
+
+ /* Get malicious queue */
+ for_each_set_bit(j, (unsigned long *)&wqbr,
+ IXGBE_QUEUES_PER_REG) {
+ /* Get queue from bitmask */
+ q = j + (i * IXGBE_QUEUES_PER_REG);
+ /* Map queue to vf */
+ vf = q / div;
+ set_bit(vf, vf_bitmap);
+ }
+ }
+}
+
#define X550_COMMON_MAC \
.init_hw = &ixgbe_init_hw_generic, \
.start_hw = &ixgbe_start_hw_X540, \
@@ -3863,6 +3979,10 @@ static const struct ixgbe_mac_operations mac_ops_X550 = {
.prot_autoc_write = prot_autoc_write_generic,
.setup_fc = ixgbe_setup_fc_generic,
.fc_autoneg = ixgbe_fc_autoneg,
+ .enable_mdd = ixgbe_enable_mdd_x550,
+ .disable_mdd = ixgbe_disable_mdd_x550,
+ .restore_mdd_vf = ixgbe_restore_mdd_vf_x550,
+ .handle_mdd = ixgbe_handle_mdd_x550,
};
static const struct ixgbe_mac_operations mac_ops_X550EM_x = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h
index 3e4092f8da3e..2a11147fb1bc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h
@@ -17,4 +17,9 @@ void ixgbe_set_source_address_pruning_x550(struct ixgbe_hw *hw,
void ixgbe_set_ethertype_anti_spoofing_x550(struct ixgbe_hw *hw,
bool enable, int vf);
+void ixgbe_enable_mdd_x550(struct ixgbe_hw *hw);
+void ixgbe_disable_mdd_x550(struct ixgbe_hw *hw);
+void ixgbe_restore_mdd_vf_x550(struct ixgbe_hw *hw, u32 vf);
+void ixgbe_handle_mdd_x550(struct ixgbe_hw *hw, unsigned long *vf_bitmap);
+
#endif /* _IXGBE_X550_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 4384e892f967..3a379e6a3a2a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -346,7 +346,6 @@ struct ixgbevf_adapter {
int num_rx_queues;
struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */
u64 hw_csum_rx_error;
- u64 hw_rx_no_dma_resources;
int num_msix_vectors;
u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed;
@@ -363,8 +362,6 @@ struct ixgbevf_adapter {
/* structs defined in ixgbe_vf.h */
struct ixgbe_hw hw;
u16 msg_enable;
- /* Interrupt Throttle Rate */
- u32 eitr_param;
struct ixgbevf_hw_stats stats;
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index da7a72ecce7a..dcaef34b88b6 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -255,7 +255,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
memset(msgbuf, 0, sizeof(msgbuf));
/* If index is one then this is the start of a new list and needs
- * indication to the PF so it can do it's own list management.
+ * indication to the PF so it can do its own list management.
* If it is zero then that tells the PF to just clear all of
* this VF's macvlans and there is no new list.
*/
diff --git a/drivers/net/ethernet/intel/libeth/Kconfig b/drivers/net/ethernet/intel/libeth/Kconfig
index 480293b71dbc..2445b979c499 100644
--- a/drivers/net/ethernet/intel/libeth/Kconfig
+++ b/drivers/net/ethernet/intel/libeth/Kconfig
@@ -1,9 +1,15 @@
# SPDX-License-Identifier: GPL-2.0-only
-# Copyright (C) 2024 Intel Corporation
+# Copyright (C) 2024-2025 Intel Corporation
config LIBETH
- tristate
+ tristate "Common Ethernet library (libeth)" if COMPILE_TEST
select PAGE_POOL
help
libeth is a common library containing routines shared between several
drivers, but not yet promoted to the generic kernel API.
+
+config LIBETH_XDP
+ tristate "Common XDP library (libeth_xdp)" if COMPILE_TEST
+ select LIBETH
+ help
+ XDP and XSk helpers based on libeth hotpath management.
diff --git a/drivers/net/ethernet/intel/libeth/Makefile b/drivers/net/ethernet/intel/libeth/Makefile
index 52492b081132..350bc0b38bad 100644
--- a/drivers/net/ethernet/intel/libeth/Makefile
+++ b/drivers/net/ethernet/intel/libeth/Makefile
@@ -1,6 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
-# Copyright (C) 2024 Intel Corporation
+# Copyright (C) 2024-2025 Intel Corporation
obj-$(CONFIG_LIBETH) += libeth.o
libeth-y := rx.o
+libeth-y += tx.o
+
+obj-$(CONFIG_LIBETH_XDP) += libeth_xdp.o
+
+libeth_xdp-y += xdp.o
+libeth_xdp-y += xsk.o
diff --git a/drivers/net/ethernet/intel/libeth/priv.h b/drivers/net/ethernet/intel/libeth/priv.h
new file mode 100644
index 000000000000..9b811d31015c
--- /dev/null
+++ b/drivers/net/ethernet/intel/libeth/priv.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2025 Intel Corporation */
+
+#ifndef __LIBETH_PRIV_H
+#define __LIBETH_PRIV_H
+
+#include <linux/types.h>
+
+/* XDP */
+
+enum xdp_action;
+struct libeth_xdp_buff;
+struct libeth_xdp_tx_frame;
+struct skb_shared_info;
+struct xdp_frame_bulk;
+
+extern const struct xsk_tx_metadata_ops libeth_xsktmo_slow;
+
+void libeth_xsk_tx_return_bulk(const struct libeth_xdp_tx_frame *bq,
+ u32 count);
+u32 libeth_xsk_prog_exception(struct libeth_xdp_buff *xdp, enum xdp_action act,
+ int ret);
+
+struct libeth_xdp_ops {
+ void (*bulk)(const struct skb_shared_info *sinfo,
+ struct xdp_frame_bulk *bq, bool frags);
+ void (*xsk)(struct libeth_xdp_buff *xdp);
+};
+
+void libeth_attach_xdp(const struct libeth_xdp_ops *ops);
+
+static inline void libeth_detach_xdp(void)
+{
+ libeth_attach_xdp(NULL);
+}
+
+#endif /* __LIBETH_PRIV_H */
diff --git a/drivers/net/ethernet/intel/libeth/rx.c b/drivers/net/ethernet/intel/libeth/rx.c
index 66d1d23b8ad2..62521a1f4ec9 100644
--- a/drivers/net/ethernet/intel/libeth/rx.c
+++ b/drivers/net/ethernet/intel/libeth/rx.c
@@ -1,5 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (C) 2024 Intel Corporation */
+/* Copyright (C) 2024-2025 Intel Corporation */
+
+#define DEFAULT_SYMBOL_NAMESPACE "LIBETH"
+
+#include <linux/export.h>
#include <net/libeth/rx.h>
@@ -68,7 +72,7 @@ static u32 libeth_rx_hw_len_truesize(const struct page_pool_params *pp,
static bool libeth_rx_page_pool_params(struct libeth_fq *fq,
struct page_pool_params *pp)
{
- pp->offset = LIBETH_SKB_HEADROOM;
+ pp->offset = fq->xdp ? LIBETH_XDP_HEADROOM : LIBETH_SKB_HEADROOM;
/* HW-writeable / syncable length per one page */
pp->max_len = LIBETH_RX_PAGE_LEN(pp->offset);
@@ -155,11 +159,12 @@ int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi)
.dev = napi->dev->dev.parent,
.netdev = napi->dev,
.napi = napi,
- .dma_dir = DMA_FROM_DEVICE,
};
struct libeth_fqe *fqes;
struct page_pool *pool;
- bool ret;
+ int ret;
+
+ pp.dma_dir = fq->xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
if (!fq->hsplit)
ret = libeth_rx_page_pool_params(fq, &pp);
@@ -173,20 +178,28 @@ int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi)
return PTR_ERR(pool);
fqes = kvcalloc_node(fq->count, sizeof(*fqes), GFP_KERNEL, fq->nid);
- if (!fqes)
+ if (!fqes) {
+ ret = -ENOMEM;
goto err_buf;
+ }
+
+ ret = xdp_reg_page_pool(pool);
+ if (ret)
+ goto err_mem;
fq->fqes = fqes;
fq->pp = pool;
return 0;
+err_mem:
+ kvfree(fqes);
err_buf:
page_pool_destroy(pool);
- return -ENOMEM;
+ return ret;
}
-EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_create, "LIBETH");
+EXPORT_SYMBOL_GPL(libeth_rx_fq_create);
/**
* libeth_rx_fq_destroy - destroy a &page_pool created by libeth
@@ -194,22 +207,23 @@ EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_create, "LIBETH");
*/
void libeth_rx_fq_destroy(struct libeth_fq *fq)
{
+ xdp_unreg_page_pool(fq->pp);
kvfree(fq->fqes);
page_pool_destroy(fq->pp);
}
-EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_destroy, "LIBETH");
+EXPORT_SYMBOL_GPL(libeth_rx_fq_destroy);
/**
- * libeth_rx_recycle_slow - recycle a libeth page from the NAPI context
- * @page: page to recycle
+ * libeth_rx_recycle_slow - recycle libeth netmem
+ * @netmem: network memory to recycle
*
* To be used on exceptions or rare cases not requiring fast inline recycling.
*/
-void libeth_rx_recycle_slow(struct page *page)
+void __cold libeth_rx_recycle_slow(netmem_ref netmem)
{
- page_pool_recycle_direct(page->pp, page);
+ page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, false);
}
-EXPORT_SYMBOL_NS_GPL(libeth_rx_recycle_slow, "LIBETH");
+EXPORT_SYMBOL_GPL(libeth_rx_recycle_slow);
/* Converting abstract packet type numbers into a software structure with
* the packet parameters to do O(1) lookup on Rx.
@@ -251,7 +265,7 @@ void libeth_rx_pt_gen_hash_type(struct libeth_rx_pt *pt)
pt->hash_type |= libeth_rx_pt_xdp_iprot[pt->inner_prot];
pt->hash_type |= libeth_rx_pt_xdp_pl[pt->payload_layer];
}
-EXPORT_SYMBOL_NS_GPL(libeth_rx_pt_gen_hash_type, "LIBETH");
+EXPORT_SYMBOL_GPL(libeth_rx_pt_gen_hash_type);
/* Module */
diff --git a/drivers/net/ethernet/intel/libeth/tx.c b/drivers/net/ethernet/intel/libeth/tx.c
new file mode 100644
index 000000000000..e0167f43d2a8
--- /dev/null
+++ b/drivers/net/ethernet/intel/libeth/tx.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2025 Intel Corporation */
+
+#define DEFAULT_SYMBOL_NAMESPACE "LIBETH"
+
+#include <net/libeth/xdp.h>
+
+#include "priv.h"
+
+/* Tx buffer completion */
+
+DEFINE_STATIC_CALL_NULL(bulk, libeth_xdp_return_buff_bulk);
+DEFINE_STATIC_CALL_NULL(xsk, libeth_xsk_buff_free_slow);
+
+/**
+ * libeth_tx_complete_any - perform Tx completion for one SQE of any type
+ * @sqe: Tx buffer to complete
+ * @cp: polling params
+ *
+ * Can be used to complete both regular and XDP SQEs, for example when
+ * destroying queues.
+ * When libeth_xdp is not loaded, XDPSQEs won't be handled.
+ */
+void libeth_tx_complete_any(struct libeth_sqe *sqe, struct libeth_cq_pp *cp)
+{
+ if (sqe->type >= __LIBETH_SQE_XDP_START)
+ __libeth_xdp_complete_tx(sqe, cp, static_call(bulk),
+ static_call(xsk));
+ else
+ libeth_tx_complete(sqe, cp);
+}
+EXPORT_SYMBOL_GPL(libeth_tx_complete_any);
+
+/* Module */
+
+void libeth_attach_xdp(const struct libeth_xdp_ops *ops)
+{
+ static_call_update(bulk, ops ? ops->bulk : NULL);
+ static_call_update(xsk, ops ? ops->xsk : NULL);
+}
+EXPORT_SYMBOL_GPL(libeth_attach_xdp);
diff --git a/drivers/net/ethernet/intel/libeth/xdp.c b/drivers/net/ethernet/intel/libeth/xdp.c
new file mode 100644
index 000000000000..d4ac027d9584
--- /dev/null
+++ b/drivers/net/ethernet/intel/libeth/xdp.c
@@ -0,0 +1,451 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2025 Intel Corporation */
+
+#define DEFAULT_SYMBOL_NAMESPACE "LIBETH_XDP"
+
+#include <linux/export.h>
+
+#include <net/libeth/xdp.h>
+
+#include "priv.h"
+
+/* XDPSQ sharing */
+
+DEFINE_STATIC_KEY_FALSE(libeth_xdpsq_share);
+EXPORT_SYMBOL_GPL(libeth_xdpsq_share);
+
+void __libeth_xdpsq_get(struct libeth_xdpsq_lock *lock,
+ const struct net_device *dev)
+{
+ bool warn;
+
+ spin_lock_init(&lock->lock);
+ lock->share = true;
+
+ warn = !static_key_enabled(&libeth_xdpsq_share);
+ static_branch_inc(&libeth_xdpsq_share);
+
+ if (warn && net_ratelimit())
+ netdev_warn(dev, "XDPSQ sharing enabled, possible XDP Tx slowdown\n");
+}
+EXPORT_SYMBOL_GPL(__libeth_xdpsq_get);
+
+void __libeth_xdpsq_put(struct libeth_xdpsq_lock *lock,
+ const struct net_device *dev)
+{
+ static_branch_dec(&libeth_xdpsq_share);
+
+ if (!static_key_enabled(&libeth_xdpsq_share) && net_ratelimit())
+ netdev_notice(dev, "XDPSQ sharing disabled\n");
+
+ lock->share = false;
+}
+EXPORT_SYMBOL_GPL(__libeth_xdpsq_put);
+
+void __acquires(&lock->lock)
+__libeth_xdpsq_lock(struct libeth_xdpsq_lock *lock)
+{
+ spin_lock(&lock->lock);
+}
+EXPORT_SYMBOL_GPL(__libeth_xdpsq_lock);
+
+void __releases(&lock->lock)
+__libeth_xdpsq_unlock(struct libeth_xdpsq_lock *lock)
+{
+ spin_unlock(&lock->lock);
+}
+EXPORT_SYMBOL_GPL(__libeth_xdpsq_unlock);
+
+/* XDPSQ clean-up timers */
+
+/**
+ * libeth_xdpsq_init_timer - initialize an XDPSQ clean-up timer
+ * @timer: timer to initialize
+ * @xdpsq: queue this timer belongs to
+ * @lock: corresponding XDPSQ lock
+ * @poll: queue polling/completion function
+ *
+ * XDPSQ clean-up timers must be set up before using at the queue configuration
+ * time. Set the required pointers and the cleaning callback.
+ */
+void libeth_xdpsq_init_timer(struct libeth_xdpsq_timer *timer, void *xdpsq,
+ struct libeth_xdpsq_lock *lock,
+ void (*poll)(struct work_struct *work))
+{
+ timer->xdpsq = xdpsq;
+ timer->lock = lock;
+
+ INIT_DELAYED_WORK(&timer->dwork, poll);
+}
+EXPORT_SYMBOL_GPL(libeth_xdpsq_init_timer);
+
+/* ``XDP_TX`` bulking */
+
+static void __cold
+libeth_xdp_tx_return_one(const struct libeth_xdp_tx_frame *frm)
+{
+ if (frm->len_fl & LIBETH_XDP_TX_MULTI)
+ libeth_xdp_return_frags(frm->data + frm->soff, true);
+
+ libeth_xdp_return_va(frm->data, true);
+}
+
+static void __cold
+libeth_xdp_tx_return_bulk(const struct libeth_xdp_tx_frame *bq, u32 count)
+{
+ for (u32 i = 0; i < count; i++) {
+ const struct libeth_xdp_tx_frame *frm = &bq[i];
+
+ if (!(frm->len_fl & LIBETH_XDP_TX_FIRST))
+ continue;
+
+ libeth_xdp_tx_return_one(frm);
+ }
+}
+
+static void __cold libeth_trace_xdp_exception(const struct net_device *dev,
+ const struct bpf_prog *prog,
+ u32 act)
+{
+ trace_xdp_exception(dev, prog, act);
+}
+
+/**
+ * libeth_xdp_tx_exception - handle Tx exceptions of XDP frames
+ * @bq: XDP Tx frame bulk
+ * @sent: number of frames sent successfully (from this bulk)
+ * @flags: internal libeth_xdp flags (XSk, .ndo_xdp_xmit etc.)
+ *
+ * Cold helper used by __libeth_xdp_tx_flush_bulk(), do not call directly.
+ * Reports XDP Tx exceptions, frees the frames that won't be sent or adjust
+ * the Tx bulk to try again later.
+ */
+void __cold libeth_xdp_tx_exception(struct libeth_xdp_tx_bulk *bq, u32 sent,
+ u32 flags)
+{
+ const struct libeth_xdp_tx_frame *pos = &bq->bulk[sent];
+ u32 left = bq->count - sent;
+
+ if (!(flags & LIBETH_XDP_TX_NDO))
+ libeth_trace_xdp_exception(bq->dev, bq->prog, XDP_TX);
+
+ if (!(flags & LIBETH_XDP_TX_DROP)) {
+ memmove(bq->bulk, pos, left * sizeof(*bq->bulk));
+ bq->count = left;
+
+ return;
+ }
+
+ if (flags & LIBETH_XDP_TX_XSK)
+ libeth_xsk_tx_return_bulk(pos, left);
+ else if (!(flags & LIBETH_XDP_TX_NDO))
+ libeth_xdp_tx_return_bulk(pos, left);
+ else
+ libeth_xdp_xmit_return_bulk(pos, left, bq->dev);
+
+ bq->count = 0;
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_tx_exception);
+
+/* .ndo_xdp_xmit() implementation */
+
+u32 __cold libeth_xdp_xmit_return_bulk(const struct libeth_xdp_tx_frame *bq,
+ u32 count, const struct net_device *dev)
+{
+ u32 n = 0;
+
+ for (u32 i = 0; i < count; i++) {
+ const struct libeth_xdp_tx_frame *frm = &bq[i];
+ dma_addr_t dma;
+
+ if (frm->flags & LIBETH_XDP_TX_FIRST)
+ dma = *libeth_xdp_xmit_frame_dma(frm->xdpf);
+ else
+ dma = dma_unmap_addr(frm, dma);
+
+ dma_unmap_page(dev->dev.parent, dma, dma_unmap_len(frm, len),
+ DMA_TO_DEVICE);
+
+ /* Actual xdp_frames are freed by the core */
+ n += !!(frm->flags & LIBETH_XDP_TX_FIRST);
+ }
+
+ return n;
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_xmit_return_bulk);
+
+/* Rx polling path */
+
+/**
+ * libeth_xdp_load_stash - recreate an &xdp_buff from libeth_xdp buffer stash
+ * @dst: target &libeth_xdp_buff to initialize
+ * @src: source stash
+ *
+ * External helper used by libeth_xdp_init_buff(), do not call directly.
+ * Recreate an onstack &libeth_xdp_buff using the stash saved earlier.
+ * The only field untouched (rxq) is initialized later in the
+ * abovementioned function.
+ */
+void libeth_xdp_load_stash(struct libeth_xdp_buff *dst,
+ const struct libeth_xdp_buff_stash *src)
+{
+ dst->data = src->data;
+ dst->base.data_end = src->data + src->len;
+ dst->base.data_meta = src->data;
+ dst->base.data_hard_start = src->data - src->headroom;
+
+ dst->base.frame_sz = src->frame_sz;
+ dst->base.flags = src->flags;
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_load_stash);
+
+/**
+ * libeth_xdp_save_stash - convert &xdp_buff to a libeth_xdp buffer stash
+ * @dst: target &libeth_xdp_buff_stash to initialize
+ * @src: source XDP buffer
+ *
+ * External helper used by libeth_xdp_save_buff(), do not call directly.
+ * Use the fields from the passed XDP buffer to initialize the stash on the
+ * queue, so that a partially received frame can be finished later during
+ * the next NAPI poll.
+ */
+void libeth_xdp_save_stash(struct libeth_xdp_buff_stash *dst,
+ const struct libeth_xdp_buff *src)
+{
+ dst->data = src->data;
+ dst->headroom = src->data - src->base.data_hard_start;
+ dst->len = src->base.data_end - src->data;
+
+ dst->frame_sz = src->base.frame_sz;
+ dst->flags = src->base.flags;
+
+ WARN_ON_ONCE(dst->flags != src->base.flags);
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_save_stash);
+
+void __libeth_xdp_return_stash(struct libeth_xdp_buff_stash *stash)
+{
+ LIBETH_XDP_ONSTACK_BUFF(xdp);
+
+ libeth_xdp_load_stash(xdp, stash);
+ libeth_xdp_return_buff_slow(xdp);
+
+ stash->data = NULL;
+}
+EXPORT_SYMBOL_GPL(__libeth_xdp_return_stash);
+
+/**
+ * libeth_xdp_return_buff_slow - free &libeth_xdp_buff
+ * @xdp: buffer to free/return
+ *
+ * Slowpath version of libeth_xdp_return_buff() to be called on exceptions,
+ * queue clean-ups etc., without unwanted inlining.
+ */
+void __cold libeth_xdp_return_buff_slow(struct libeth_xdp_buff *xdp)
+{
+ __libeth_xdp_return_buff(xdp, false);
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_return_buff_slow);
+
+/**
+ * libeth_xdp_buff_add_frag - add frag to XDP buffer
+ * @xdp: head XDP buffer
+ * @fqe: Rx buffer containing the frag
+ * @len: frag length reported by HW
+ *
+ * External helper used by libeth_xdp_process_buff(), do not call directly.
+ * Frees both head and frag buffers on error.
+ *
+ * Return: true success, false on error (no space for a new frag).
+ */
+bool libeth_xdp_buff_add_frag(struct libeth_xdp_buff *xdp,
+ const struct libeth_fqe *fqe,
+ u32 len)
+{
+ netmem_ref netmem = fqe->netmem;
+
+ if (!xdp_buff_add_frag(&xdp->base, netmem,
+ fqe->offset + netmem_get_pp(netmem)->p.offset,
+ len, fqe->truesize))
+ goto recycle;
+
+ return true;
+
+recycle:
+ libeth_rx_recycle_slow(netmem);
+ libeth_xdp_return_buff_slow(xdp);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_buff_add_frag);
+
+/**
+ * libeth_xdp_prog_exception - handle XDP prog exceptions
+ * @bq: XDP Tx bulk
+ * @xdp: buffer to process
+ * @act: original XDP prog verdict
+ * @ret: error code if redirect failed
+ *
+ * External helper used by __libeth_xdp_run_prog() and
+ * __libeth_xsk_run_prog_slow(), do not call directly.
+ * Reports invalid @act, XDP exception trace event and frees the buffer.
+ *
+ * Return: libeth_xdp XDP prog verdict.
+ */
+u32 __cold libeth_xdp_prog_exception(const struct libeth_xdp_tx_bulk *bq,
+ struct libeth_xdp_buff *xdp,
+ enum xdp_action act, int ret)
+{
+ if (act > XDP_REDIRECT)
+ bpf_warn_invalid_xdp_action(bq->dev, bq->prog, act);
+
+ libeth_trace_xdp_exception(bq->dev, bq->prog, act);
+
+ if (xdp->base.rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
+ return libeth_xsk_prog_exception(xdp, act, ret);
+
+ libeth_xdp_return_buff_slow(xdp);
+
+ return LIBETH_XDP_DROP;
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_prog_exception);
+
+/* Tx buffer completion */
+
+static void libeth_xdp_put_netmem_bulk(netmem_ref netmem,
+ struct xdp_frame_bulk *bq)
+{
+ if (unlikely(bq->count == XDP_BULK_QUEUE_SIZE))
+ xdp_flush_frame_bulk(bq);
+
+ bq->q[bq->count++] = netmem;
+}
+
+/**
+ * libeth_xdp_return_buff_bulk - free &xdp_buff as part of a bulk
+ * @sinfo: shared info corresponding to the buffer
+ * @bq: XDP frame bulk to store the buffer
+ * @frags: whether the buffer has frags
+ *
+ * Same as xdp_return_frame_bulk(), but for &libeth_xdp_buff, speeds up Tx
+ * completion of ``XDP_TX`` buffers and allows to free them in same bulks
+ * with &xdp_frame buffers.
+ */
+void libeth_xdp_return_buff_bulk(const struct skb_shared_info *sinfo,
+ struct xdp_frame_bulk *bq, bool frags)
+{
+ if (!frags)
+ goto head;
+
+ for (u32 i = 0; i < sinfo->nr_frags; i++)
+ libeth_xdp_put_netmem_bulk(skb_frag_netmem(&sinfo->frags[i]),
+ bq);
+
+head:
+ libeth_xdp_put_netmem_bulk(virt_to_netmem(sinfo), bq);
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_return_buff_bulk);
+
+/* Misc */
+
+/**
+ * libeth_xdp_queue_threshold - calculate XDP queue clean/refill threshold
+ * @count: number of descriptors in the queue
+ *
+ * The threshold is the limit at which RQs start to refill (when the number of
+ * empty buffers exceeds it) and SQs get cleaned up (when the number of free
+ * descriptors goes below it). To speed up hotpath processing, threshold is
+ * always pow-2, closest to 1/4 of the queue length.
+ * Don't call it on hotpath, calculate and cache the threshold during the
+ * queue initialization.
+ *
+ * Return: the calculated threshold.
+ */
+u32 libeth_xdp_queue_threshold(u32 count)
+{
+ u32 quarter, low, high;
+
+ if (likely(is_power_of_2(count)))
+ return count >> 2;
+
+ quarter = DIV_ROUND_CLOSEST(count, 4);
+ low = rounddown_pow_of_two(quarter);
+ high = roundup_pow_of_two(quarter);
+
+ return high - quarter <= quarter - low ? high : low;
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_queue_threshold);
+
+/**
+ * __libeth_xdp_set_features - set XDP features for netdev
+ * @dev: &net_device to configure
+ * @xmo: XDP metadata ops (Rx hints)
+ * @zc_segs: maximum number of S/G frags the HW can transmit
+ * @tmo: XSk Tx metadata ops (Tx hints)
+ *
+ * Set all the features libeth_xdp supports. Only the first argument is
+ * necessary; without the third one (zero), XSk support won't be advertised.
+ * Use the non-underscored versions in drivers instead.
+ */
+void __libeth_xdp_set_features(struct net_device *dev,
+ const struct xdp_metadata_ops *xmo,
+ u32 zc_segs,
+ const struct xsk_tx_metadata_ops *tmo)
+{
+ xdp_set_features_flag(dev,
+ NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ (zc_segs ? NETDEV_XDP_ACT_XSK_ZEROCOPY : 0) |
+ NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG);
+ dev->xdp_metadata_ops = xmo;
+
+ tmo = tmo == libeth_xsktmo ? &libeth_xsktmo_slow : tmo;
+
+ dev->xdp_zc_max_segs = zc_segs ? : 1;
+ dev->xsk_tx_metadata_ops = zc_segs ? tmo : NULL;
+}
+EXPORT_SYMBOL_GPL(__libeth_xdp_set_features);
+
+/**
+ * libeth_xdp_set_redirect - toggle the XDP redirect feature
+ * @dev: &net_device to configure
+ * @enable: whether XDP is enabled
+ *
+ * Use this when XDPSQs are not always available to dynamically enable
+ * and disable redirect feature.
+ */
+void libeth_xdp_set_redirect(struct net_device *dev, bool enable)
+{
+ if (enable)
+ xdp_features_set_redirect_target(dev, true);
+ else
+ xdp_features_clear_redirect_target(dev);
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_set_redirect);
+
+/* Module */
+
+static const struct libeth_xdp_ops xdp_ops __initconst = {
+ .bulk = libeth_xdp_return_buff_bulk,
+ .xsk = libeth_xsk_buff_free_slow,
+};
+
+static int __init libeth_xdp_module_init(void)
+{
+ libeth_attach_xdp(&xdp_ops);
+
+ return 0;
+}
+module_init(libeth_xdp_module_init);
+
+static void __exit libeth_xdp_module_exit(void)
+{
+ libeth_detach_xdp();
+}
+module_exit(libeth_xdp_module_exit);
+
+MODULE_DESCRIPTION("Common Ethernet library - XDP infra");
+MODULE_IMPORT_NS("LIBETH");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/intel/libeth/xsk.c b/drivers/net/ethernet/intel/libeth/xsk.c
new file mode 100644
index 000000000000..846e902e31b6
--- /dev/null
+++ b/drivers/net/ethernet/intel/libeth/xsk.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2025 Intel Corporation */
+
+#define DEFAULT_SYMBOL_NAMESPACE "LIBETH_XDP"
+
+#include <linux/export.h>
+
+#include <net/libeth/xsk.h>
+
+#include "priv.h"
+
+/* ``XDP_TX`` bulking */
+
+void __cold libeth_xsk_tx_return_bulk(const struct libeth_xdp_tx_frame *bq,
+ u32 count)
+{
+ for (u32 i = 0; i < count; i++)
+ libeth_xsk_buff_free_slow(bq[i].xsk);
+}
+
+/* XSk TMO */
+
+const struct xsk_tx_metadata_ops libeth_xsktmo_slow = {
+ .tmo_request_checksum = libeth_xsktmo_req_csum,
+};
+
+/* Rx polling path */
+
+/**
+ * libeth_xsk_buff_free_slow - free an XSk Rx buffer
+ * @xdp: buffer to free
+ *
+ * Slowpath version of xsk_buff_free() to be used on exceptions, cleanups etc.
+ * to avoid unwanted inlining.
+ */
+void libeth_xsk_buff_free_slow(struct libeth_xdp_buff *xdp)
+{
+ xsk_buff_free(&xdp->base);
+}
+EXPORT_SYMBOL_GPL(libeth_xsk_buff_free_slow);
+
+/**
+ * libeth_xsk_buff_add_frag - add frag to XSk Rx buffer
+ * @head: head buffer
+ * @xdp: frag buffer
+ *
+ * External helper used by libeth_xsk_process_buff(), do not call directly.
+ * Frees both main and frag buffers on error.
+ *
+ * Return: main buffer with attached frag on success, %NULL on error (no space
+ * for a new frag).
+ */
+struct libeth_xdp_buff *libeth_xsk_buff_add_frag(struct libeth_xdp_buff *head,
+ struct libeth_xdp_buff *xdp)
+{
+ if (!xsk_buff_add_frag(&head->base, &xdp->base))
+ goto free;
+
+ return head;
+
+free:
+ libeth_xsk_buff_free_slow(xdp);
+ libeth_xsk_buff_free_slow(head);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(libeth_xsk_buff_add_frag);
+
+/**
+ * libeth_xsk_buff_stats_frags - update onstack RQ stats with XSk frags info
+ * @rs: onstack stats to update
+ * @xdp: buffer to account
+ *
+ * External helper used by __libeth_xsk_run_pass(), do not call directly.
+ * Adds buffer's frags count and total len to the onstack stats.
+ */
+void libeth_xsk_buff_stats_frags(struct libeth_rq_napi_stats *rs,
+ const struct libeth_xdp_buff *xdp)
+{
+ libeth_xdp_buff_stats_frags(rs, xdp);
+}
+EXPORT_SYMBOL_GPL(libeth_xsk_buff_stats_frags);
+
+/**
+ * __libeth_xsk_run_prog_slow - process the non-``XDP_REDIRECT`` verdicts
+ * @xdp: buffer to process
+ * @bq: Tx bulk for queueing on ``XDP_TX``
+ * @act: verdict to process
+ * @ret: error code if ``XDP_REDIRECT`` failed
+ *
+ * External helper used by __libeth_xsk_run_prog(), do not call directly.
+ * ``XDP_REDIRECT`` is the most common and hottest verdict on XSk, thus
+ * it is processed inline. The rest goes here for out-of-line processing,
+ * together with redirect errors.
+ *
+ * Return: libeth_xdp XDP prog verdict.
+ */
+u32 __libeth_xsk_run_prog_slow(struct libeth_xdp_buff *xdp,
+ const struct libeth_xdp_tx_bulk *bq,
+ enum xdp_action act, int ret)
+{
+ switch (act) {
+ case XDP_DROP:
+ xsk_buff_free(&xdp->base);
+
+ return LIBETH_XDP_DROP;
+ case XDP_TX:
+ return LIBETH_XDP_TX;
+ case XDP_PASS:
+ return LIBETH_XDP_PASS;
+ default:
+ break;
+ }
+
+ return libeth_xdp_prog_exception(bq, xdp, act, ret);
+}
+EXPORT_SYMBOL_GPL(__libeth_xsk_run_prog_slow);
+
+/**
+ * libeth_xsk_prog_exception - handle XDP prog exceptions on XSk
+ * @xdp: buffer to process
+ * @act: verdict returned by the prog
+ * @ret: error code if ``XDP_REDIRECT`` failed
+ *
+ * Internal. Frees the buffer and, if the queue uses XSk wakeups, stop the
+ * current NAPI poll when there are no free buffers left.
+ *
+ * Return: libeth_xdp's XDP prog verdict.
+ */
+u32 __cold libeth_xsk_prog_exception(struct libeth_xdp_buff *xdp,
+ enum xdp_action act, int ret)
+{
+ const struct xdp_buff_xsk *xsk;
+ u32 __ret = LIBETH_XDP_DROP;
+
+ if (act != XDP_REDIRECT)
+ goto drop;
+
+ xsk = container_of(&xdp->base, typeof(*xsk), xdp);
+ if (xsk_uses_need_wakeup(xsk->pool) && ret == -ENOBUFS)
+ __ret = LIBETH_XDP_ABORTED;
+
+drop:
+ libeth_xsk_buff_free_slow(xdp);
+
+ return __ret;
+}
+
+/* Refill */
+
+/**
+ * libeth_xskfq_create - create an XSkFQ
+ * @fq: fill queue to initialize
+ *
+ * Allocates the FQEs and initializes the fields used by libeth_xdp: number
+ * of buffers to refill, refill threshold and buffer len.
+ *
+ * Return: %0 on success, -errno otherwise.
+ */
+int libeth_xskfq_create(struct libeth_xskfq *fq)
+{
+ fq->fqes = kvcalloc_node(fq->count, sizeof(*fq->fqes), GFP_KERNEL,
+ fq->nid);
+ if (!fq->fqes)
+ return -ENOMEM;
+
+ fq->pending = fq->count;
+ fq->thresh = libeth_xdp_queue_threshold(fq->count);
+ fq->buf_len = xsk_pool_get_rx_frame_size(fq->pool);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(libeth_xskfq_create);
+
+/**
+ * libeth_xskfq_destroy - destroy an XSkFQ
+ * @fq: fill queue to destroy
+ *
+ * Zeroes the used fields and frees the FQEs array.
+ */
+void libeth_xskfq_destroy(struct libeth_xskfq *fq)
+{
+ fq->buf_len = 0;
+ fq->thresh = 0;
+ fq->pending = 0;
+
+ kvfree(fq->fqes);
+}
+EXPORT_SYMBOL_GPL(libeth_xskfq_destroy);
+
+/* .ndo_xsk_wakeup */
+
+static void libeth_xsk_napi_sched(void *info)
+{
+ __napi_schedule_irqoff(info);
+}
+
+/**
+ * libeth_xsk_init_wakeup - initialize libeth XSk wakeup structure
+ * @csd: struct to initialize
+ * @napi: NAPI corresponding to this queue
+ *
+ * libeth_xdp uses inter-processor interrupts to perform XSk wakeups. In order
+ * to do that, the corresponding CSDs must be initialized when creating the
+ * queues.
+ */
+void libeth_xsk_init_wakeup(call_single_data_t *csd, struct napi_struct *napi)
+{
+ INIT_CSD(csd, libeth_xsk_napi_sched, napi);
+}
+EXPORT_SYMBOL_GPL(libeth_xsk_init_wakeup);
+
+/**
+ * libeth_xsk_wakeup - perform an XSk wakeup
+ * @csd: CSD corresponding to the queue
+ * @qid: the stack queue index
+ *
+ * Try to mark the NAPI as missed first, so that it could be rescheduled.
+ * If it's not, schedule it on the corresponding CPU using IPIs (or directly
+ * if already running on it).
+ */
+void libeth_xsk_wakeup(call_single_data_t *csd, u32 qid)
+{
+ struct napi_struct *napi = csd->info;
+
+ if (napi_if_scheduled_mark_missed(napi) ||
+ unlikely(!napi_schedule_prep(napi)))
+ return;
+
+ if (unlikely(qid >= nr_cpu_ids))
+ qid %= nr_cpu_ids;
+
+ if (qid != raw_smp_processor_id() && cpu_online(qid))
+ smp_call_function_single_async(qid, csd);
+ else
+ __napi_schedule(napi);
+}
+EXPORT_SYMBOL_GPL(libeth_xsk_wakeup);
+
+/* Pool setup */
+
+#define LIBETH_XSK_DMA_ATTR \
+ (DMA_ATTR_WEAK_ORDERING | DMA_ATTR_SKIP_CPU_SYNC)
+
+/**
+ * libeth_xsk_setup_pool - setup or destroy an XSk pool for a queue
+ * @dev: target &net_device
+ * @qid: stack queue index to configure
+ * @enable: whether to enable or disable the pool
+ *
+ * Check that @qid is valid and then map or unmap the pool.
+ *
+ * Return: %0 on success, -errno otherwise.
+ */
+int libeth_xsk_setup_pool(struct net_device *dev, u32 qid, bool enable)
+{
+ struct xsk_buff_pool *pool;
+
+ pool = xsk_get_pool_from_qid(dev, qid);
+ if (!pool)
+ return -EINVAL;
+
+ if (enable)
+ return xsk_pool_dma_map(pool, dev->dev.parent,
+ LIBETH_XSK_DMA_ATTR);
+ else
+ xsk_pool_dma_unmap(pool, LIBETH_XSK_DMA_ATTR);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(libeth_xsk_setup_pool);
diff --git a/drivers/net/ethernet/intel/libie/Kconfig b/drivers/net/ethernet/intel/libie/Kconfig
index 33aff6bc8f81..e6072758e3d8 100644
--- a/drivers/net/ethernet/intel/libie/Kconfig
+++ b/drivers/net/ethernet/intel/libie/Kconfig
@@ -8,3 +8,9 @@ config LIBIE
libie (Intel Ethernet library) is a common library built on top of
libeth and containing vendor-specific routines shared between several
Intel Ethernet drivers.
+
+config LIBIE_ADMINQ
+ tristate
+ help
+ Helper functions used by Intel Ethernet drivers for administration
+ queue command interface (aka adminq).
diff --git a/drivers/net/ethernet/intel/libie/Makefile b/drivers/net/ethernet/intel/libie/Makefile
index ffd27fab916a..e98f00b865d3 100644
--- a/drivers/net/ethernet/intel/libie/Makefile
+++ b/drivers/net/ethernet/intel/libie/Makefile
@@ -4,3 +4,7 @@
obj-$(CONFIG_LIBIE) += libie.o
libie-y := rx.o
+
+obj-$(CONFIG_LIBIE_ADMINQ) += libie_adminq.o
+
+libie_adminq-y := adminq.o
diff --git a/drivers/net/ethernet/intel/libie/adminq.c b/drivers/net/ethernet/intel/libie/adminq.c
new file mode 100644
index 000000000000..55356548e3f0
--- /dev/null
+++ b/drivers/net/ethernet/intel/libie/adminq.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2025 Intel Corporation */
+
+#include <linux/module.h>
+#include <linux/net/intel/libie/adminq.h>
+
+static const char * const libie_aq_str_arr[] = {
+#define LIBIE_AQ_STR(x) \
+ [LIBIE_AQ_RC_##x] = "LIBIE_AQ_RC" #x
+ LIBIE_AQ_STR(OK),
+ LIBIE_AQ_STR(EPERM),
+ LIBIE_AQ_STR(ENOENT),
+ LIBIE_AQ_STR(ESRCH),
+ LIBIE_AQ_STR(EIO),
+ LIBIE_AQ_STR(EAGAIN),
+ LIBIE_AQ_STR(ENOMEM),
+ LIBIE_AQ_STR(EACCES),
+ LIBIE_AQ_STR(EBUSY),
+ LIBIE_AQ_STR(EEXIST),
+ LIBIE_AQ_STR(EINVAL),
+ LIBIE_AQ_STR(ENOSPC),
+ LIBIE_AQ_STR(ENOSYS),
+ LIBIE_AQ_STR(EMODE),
+ LIBIE_AQ_STR(ENOSEC),
+ LIBIE_AQ_STR(EBADSIG),
+ LIBIE_AQ_STR(ESVN),
+ LIBIE_AQ_STR(EBADMAN),
+ LIBIE_AQ_STR(EBADBUF),
+#undef LIBIE_AQ_STR
+ "LIBIE_AQ_RC_UNKNOWN",
+};
+
+#define __LIBIE_AQ_STR_NUM (ARRAY_SIZE(libie_aq_str_arr) - 1)
+
+/**
+ * libie_aq_str - get error string based on aq error
+ * @err: admin queue error type
+ *
+ * Return: error string for passed error code
+ */
+const char *libie_aq_str(enum libie_aq_err err)
+{
+ if (err >= ARRAY_SIZE(libie_aq_str_arr) ||
+ !libie_aq_str_arr[err])
+ err = __LIBIE_AQ_STR_NUM;
+
+ return libie_aq_str_arr[err];
+}
+EXPORT_SYMBOL_NS_GPL(libie_aq_str, "LIBIE_ADMINQ");
+
+MODULE_DESCRIPTION("Intel(R) Ethernet common library - adminq helpers");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/intel/libie/rx.c b/drivers/net/ethernet/intel/libie/rx.c
index 66a9825fe11f..6fda656afa9c 100644
--- a/drivers/net/ethernet/intel/libie/rx.c
+++ b/drivers/net/ethernet/intel/libie/rx.c
@@ -1,6 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (C) 2024 Intel Corporation */
+/* Copyright (C) 2024-2025 Intel Corporation */
+#define DEFAULT_SYMBOL_NAMESPACE "LIBIE"
+
+#include <linux/export.h>
#include <linux/net/intel/libie/rx.h>
/* O(1) converting i40e/ice/iavf's 8/10-bit hardware packet type to a parsed
@@ -116,7 +119,7 @@ const struct libeth_rx_pt libie_rx_pt_lut[LIBIE_RX_PT_NUM] = {
LIBIE_RX_PT_IP(4),
LIBIE_RX_PT_IP(6),
};
-EXPORT_SYMBOL_NS_GPL(libie_rx_pt_lut, "LIBIE");
+EXPORT_SYMBOL_GPL(libie_rx_pt_lut);
MODULE_DESCRIPTION("Intel(R) Ethernet common library");
MODULE_IMPORT_NS("LIBETH");
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 147571fdada3..feab392ab2ee 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -5014,8 +5014,6 @@ static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
case ETHTOOL_GRXRINGS:
info->data = rxq_number;
return 0;
- case ETHTOOL_GRXFH:
- return -EOPNOTSUPP;
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.h b/drivers/net/ethernet/marvell/mvneta_bm.h
index e47783ce77e0..57ac039df6f7 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.h
+++ b/drivers/net/ethernet/marvell/mvneta_bm.h
@@ -115,7 +115,7 @@ struct mvneta_bm_pool {
/* Packet size */
int pkt_size;
- /* Size of the buffer acces through DMA*/
+ /* Size of the buffer access through DMA */
u32 buf_size;
/* BPPE virtual base address */
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index 8ed83fb98862..44b201817d94 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -1618,7 +1618,8 @@ int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 port_ctx,
return 0;
}
-int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
+int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port,
+ const struct ethtool_rxfh_fields *info)
{
u16 hash_opts = 0;
u32 flow_type;
@@ -1656,7 +1657,8 @@ int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
return mvpp2_port_rss_hash_opts_set(port, flow_type, hash_opts);
}
-int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
+int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port,
+ struct ethtool_rxfh_fields *info)
{
unsigned long hash_opts;
u32 flow_type;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
index 85c9c6e80678..caadf3aea95d 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
@@ -272,8 +272,10 @@ int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 rss_ctx,
int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 rss_ctx,
u32 *indir);
-int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info);
-int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info);
+int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port,
+ struct ethtool_rxfh_fields *info);
+int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port,
+ const struct ethtool_rxfh_fields *info);
void mvpp2_cls_init(struct mvpp2 *priv);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index a7872d14a49d..8ebb985d2573 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -5588,9 +5588,6 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
return -EOPNOTSUPP;
switch (info->cmd) {
- case ETHTOOL_GRXFH:
- ret = mvpp2_ethtool_rxfh_get(port, info);
- break;
case ETHTOOL_GRXRINGS:
info->data = port->nrxqs;
break;
@@ -5628,9 +5625,6 @@ static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
return -EOPNOTSUPP;
switch (info->cmd) {
- case ETHTOOL_SRXFH:
- ret = mvpp2_ethtool_rxfh_set(port, info);
- break;
case ETHTOOL_SRXCLSRLINS:
ret = mvpp2_ethtool_cls_rule_ins(port, info);
break;
@@ -5747,6 +5741,29 @@ static int mvpp2_ethtool_set_rxfh(struct net_device *dev,
return mvpp2_modify_rxfh_context(dev, NULL, rxfh, extack);
}
+static int mvpp2_ethtool_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!mvpp22_rss_is_supported(port))
+ return -EOPNOTSUPP;
+
+ return mvpp2_ethtool_rxfh_get(port, info);
+}
+
+static int mvpp2_ethtool_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *info,
+ struct netlink_ext_ack *extack)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!mvpp22_rss_is_supported(port))
+ return -EOPNOTSUPP;
+
+ return mvpp2_ethtool_rxfh_set(port, info);
+}
+
static int mvpp2_ethtool_get_eee(struct net_device *dev,
struct ethtool_keee *eee)
{
@@ -5813,6 +5830,8 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
.get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
.get_rxfh = mvpp2_ethtool_get_rxfh,
.set_rxfh = mvpp2_ethtool_set_rxfh,
+ .get_rxfh_fields = mvpp2_ethtool_get_rxfh_fields,
+ .set_rxfh_fields = mvpp2_ethtool_set_rxfh_fields,
.create_rxfh_context = mvpp2_create_rxfh_context,
.modify_rxfh_context = mvpp2_modify_rxfh_context,
.remove_rxfh_context = mvpp2_remove_rxfh_context,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index ccea37847df8..532813d8d028 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -12,4 +12,4 @@ rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \
rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \
rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o \
- rvu_rep.o
+ rvu_rep.o cn20k/mbox_init.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 971993586fb4..4ff19a04b23e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -1182,17 +1182,25 @@ static int cgx_link_usertable_index_map(int speed)
static void set_mod_args(struct cgx_set_link_mode_args *args,
u32 speed, u8 duplex, u8 autoneg, u64 mode)
{
- /* Fill default values incase of user did not pass
- * valid parameters
+ int mode_baseidx;
+ u8 cgx_mode;
+
+ if (args->multimode) {
+ args->mode |= mode;
+ return;
+ }
+
+ /* Derive mode_base_idx and mode fields based
+ * on cgx_mode value
*/
- if (args->duplex == DUPLEX_UNKNOWN)
- args->duplex = duplex;
- if (args->speed == SPEED_UNKNOWN)
- args->speed = speed;
- if (args->an == AUTONEG_UNKNOWN)
- args->an = autoneg;
+ cgx_mode = find_first_bit((unsigned long *)&mode,
+ CGX_MODE_MAX);
args->mode = mode;
- args->ports = 0;
+ mode_baseidx = cgx_mode - 41;
+ if (mode_baseidx > 0) {
+ args->mode_baseidx = 1;
+ args->mode = BIT_ULL(mode_baseidx);
+ }
}
static void otx2_map_ethtool_link_modes(u64 bitmask,
@@ -1200,16 +1208,16 @@ static void otx2_map_ethtool_link_modes(u64 bitmask,
{
switch (bitmask) {
case ETHTOOL_LINK_MODE_10baseT_Half_BIT:
- set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII));
+ set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII_10M_BIT));
break;
case ETHTOOL_LINK_MODE_10baseT_Full_BIT:
- set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII));
+ set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII_10M_BIT));
break;
case ETHTOOL_LINK_MODE_100baseT_Half_BIT:
- set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII));
+ set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII_100M_BIT));
break;
case ETHTOOL_LINK_MODE_100baseT_Full_BIT:
- set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII));
+ set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII_100M_BIT));
break;
case ETHTOOL_LINK_MODE_1000baseT_Half_BIT:
set_mod_args(args, 1000, 1, 1, BIT_ULL(CGX_MODE_SGMII));
@@ -1481,25 +1489,36 @@ int cgx_get_fwdata_base(u64 *base)
}
int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
+ struct cgx_lmac_fwdata_s *linkmodes,
int cgx_id, int lmac_id)
{
struct cgx *cgx = cgxd;
u64 req = 0, resp;
+ u8 bit;
if (!cgx)
return -ENODEV;
- if (args.mode)
- otx2_map_ethtool_link_modes(args.mode, &args);
- if (!args.speed && args.duplex && !args.an)
- return -EINVAL;
+ for_each_set_bit(bit, args.advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS)
+ otx2_map_ethtool_link_modes(bit, &args);
+
+ if (args.multimode) {
+ if (linkmodes->advertised_link_modes_own != CGX_CMD_OWN_NS)
+ return -EBUSY;
+
+ linkmodes->advertised_link_modes = args.mode;
+ /* Update ownership */
+ linkmodes->advertised_link_modes_own = CGX_CMD_OWN_FIRMWARE;
+ args.mode = GENMASK_ULL(41, 0);
+ }
req = FIELD_SET(CMDREG_ID, CGX_CMD_MODE_CHANGE, req);
req = FIELD_SET(CMDMODECHANGE_SPEED,
cgx_link_usertable_index_map(args.speed), req);
req = FIELD_SET(CMDMODECHANGE_DUPLEX, args.duplex, req);
req = FIELD_SET(CMDMODECHANGE_AN, args.an, req);
- req = FIELD_SET(CMDMODECHANGE_PORT, args.ports, req);
+ req = FIELD_SET(CMDMODECHANGE_MODE_BASEIDX, args.mode_baseidx, req);
req = FIELD_SET(CMDMODECHANGE_FLAGS, args.mode, req);
return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
@@ -1685,9 +1704,11 @@ unsigned long cgx_get_lmac_bmap(void *cgxd)
static int cgx_lmac_init(struct cgx *cgx)
{
+ u8 max_dmac_filters;
struct lmac *lmac;
+ int err, filter;
+ unsigned int i;
u64 lmac_list;
- int i, err;
/* lmac_list specifies which lmacs are enabled
* when bit n is set to 1, LMAC[n] is enabled
@@ -1713,7 +1734,7 @@ static int cgx_lmac_init(struct cgx *cgx)
err = -ENOMEM;
goto err_lmac_free;
}
- sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
+ sprintf(lmac->name, "cgx_fwi_%u_%u", cgx->cgx_id, i);
if (cgx->mac_ops->non_contiguous_serdes_lane) {
lmac->lmac_id = __ffs64(lmac_list);
lmac_list &= ~BIT_ULL(lmac->lmac_id);
@@ -1726,6 +1747,8 @@ static int cgx_lmac_init(struct cgx *cgx)
cgx->mac_ops->dmac_filter_count /
cgx->lmac_count;
+ max_dmac_filters = lmac->mac_to_index_bmap.max;
+
err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
if (err)
goto err_name_free;
@@ -1755,6 +1778,15 @@ static int cgx_lmac_init(struct cgx *cgx)
set_bit(lmac->lmac_id, &cgx->lmac_bmap);
cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
lmac->lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac->lmac_id);
+
+ /* Disable stale DMAC filters for sane state */
+ for (filter = 0; filter < max_dmac_filters; filter++)
+ cgx_lmac_addr_del(cgx->cgx_id, lmac->lmac_id, filter);
+
+ /* As cgx_lmac_addr_del does not clear entry for index 0
+ * so it needs to be done explicitly
+ */
+ cgx_lmac_addr_reset(cgx->cgx_id, lmac->lmac_id);
}
/* Start X2P reset on given MAC block */
@@ -1932,6 +1964,12 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_disable_device;
}
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "DMA mask config failed, abort\n");
+ goto err_release_regions;
+ }
+
/* MAP configuration registers */
cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
if (!cgx->reg_base) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 1cf12e5c7da8..950231e7ea71 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -171,6 +171,7 @@ int cgx_set_fec(u64 fec, int cgx_id, int lmac_id);
int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp);
int cgx_get_phy_fec_stats(void *cgxd, int lmac_id);
int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
+ struct cgx_lmac_fwdata_s *linkmodes,
int cgx_id, int lmac_id);
u64 cgx_features_get(void *cgxd);
struct mac_ops *get_mac_ops(void *cgxd);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
index d4a27c882a5b..39352d451cc3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -95,7 +95,31 @@ enum CGX_MODE_ {
CGX_MODE_100G_C2M,
CGX_MODE_100G_CR4,
CGX_MODE_100G_KR4,
- CGX_MODE_MAX /* = 29 */
+ CGX_MODE_LAUI_2_C2C_BIT,
+ CGX_MODE_LAUI_2_C2M_BIT,
+ CGX_MODE_50GBASE_CR2_C_BIT,
+ CGX_MODE_50GBASE_KR2_C_BIT, /* = 30 */
+ CGX_MODE_100GAUI_2_C2C_BIT,
+ CGX_MODE_100GAUI_2_C2M_BIT,
+ CGX_MODE_100GBASE_CR2_BIT,
+ CGX_MODE_100GBASE_KR2_BIT,
+ CGX_MODE_SFI_1G_BIT,
+ CGX_MODE_25GBASE_CR_C_BIT,
+ CGX_MODE_25GBASE_KR_C_BIT,
+ CGX_MODE_SGMII_10M_BIT,
+ CGX_MODE_SGMII_100M_BIT, /* = 39 */
+ CGX_MODE_2500_BASEX_BIT = 42, /* Mode group 1 */
+ CGX_MODE_5000_BASEX_BIT,
+ CGX_MODE_O_USGMII_BIT,
+ CGX_MODE_Q_USGMII_BIT,
+ CGX_MODE_2_5G_USXGMII_BIT,
+ CGX_MODE_5G_USXGMII_BIT,
+ CGX_MODE_10G_SXGMII_BIT,
+ CGX_MODE_10G_DXGMII_BIT,
+ CGX_MODE_10G_QXGMII_BIT,
+ CGX_MODE_TP_BIT,
+ CGX_MODE_FIBER_BIT,
+ CGX_MODE_MAX /* = 53 */
};
/* REQUEST ID types. Input to firmware */
enum cgx_cmd_id {
@@ -258,7 +282,12 @@ struct cgx_lnk_sts {
#define CMDMODECHANGE_SPEED GENMASK_ULL(11, 8)
#define CMDMODECHANGE_DUPLEX GENMASK_ULL(12, 12)
#define CMDMODECHANGE_AN GENMASK_ULL(13, 13)
-#define CMDMODECHANGE_PORT GENMASK_ULL(21, 14)
+/* this field categorize the mode ID(FLAGS) range to accommodate
+ * more modes.
+ * To specify mode ID range of 0 - 41, this field will be 0.
+ * To specify mode ID range of 42 - 83, this field will be 1.
+ */
+#define CMDMODECHANGE_MODE_BASEIDX GENMASK_ULL(21, 20)
#define CMDMODECHANGE_FLAGS GENMASK_ULL(63, 22)
/* LINK_BRING_UP command timeout */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h
new file mode 100644
index 000000000000..4285b5d6a6a2
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#ifndef CN20K_API_H
+#define CN20K_API_H
+
+#include "../rvu.h"
+
+struct ng_rvu {
+ struct mbox_ops *rvu_mbox_ops;
+ struct qmem *pf_mbox_addr;
+ struct qmem *vf_mbox_addr;
+};
+
+/* Mbox related APIs */
+int cn20k_rvu_mbox_init(struct rvu *rvu, int type, int num);
+int cn20k_rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
+ int num, int type, unsigned long *pf_bmap);
+void cn20k_free_mbox_memory(struct rvu *rvu);
+int cn20k_register_afpf_mbox_intr(struct rvu *rvu);
+int cn20k_register_afvf_mbox_intr(struct rvu *rvu, int pf_vec_start);
+void cn20k_rvu_enable_mbox_intr(struct rvu *rvu);
+void cn20k_rvu_unregister_interrupts(struct rvu *rvu);
+int cn20k_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ void *reg_base, int direction, int ndevs);
+void cn20k_rvu_enable_afvf_intr(struct rvu *rvu, int vfs);
+void cn20k_rvu_disable_afvf_intr(struct rvu *rvu, int vfs);
+#endif /* CN20K_API_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c
new file mode 100644
index 000000000000..bd3aab7770dd
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c
@@ -0,0 +1,424 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include "rvu_trace.h"
+#include "mbox.h"
+#include "reg.h"
+#include "api.h"
+
+static irqreturn_t cn20k_afvf_mbox_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_irq_data *rvu_irq_data = rvu_irq;
+ struct rvu *rvu = rvu_irq_data->rvu;
+ u64 intr;
+
+ /* Sync with mbox memory region */
+ rmb();
+
+ /* Clear interrupts */
+ intr = rvupf_read64(rvu, rvu_irq_data->intr_status);
+ rvupf_write64(rvu, rvu_irq_data->intr_status, intr);
+
+ if (intr)
+ trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr);
+
+ rvu_irq_data->afvf_queue_work_hdlr(&rvu->afvf_wq_info, rvu_irq_data->start,
+ rvu_irq_data->mdevs, intr);
+
+ return IRQ_HANDLED;
+}
+
+int cn20k_register_afvf_mbox_intr(struct rvu *rvu, int pf_vec_start)
+{
+ struct rvu_irq_data *irq_data;
+ int intr_vec, offset, vec = 0;
+ int err;
+
+ /* irq data for 4 VFPF intr vectors */
+ irq_data = devm_kcalloc(rvu->dev, 4,
+ sizeof(struct rvu_irq_data), GFP_KERNEL);
+ if (!irq_data)
+ return -ENOMEM;
+
+ for (intr_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0; intr_vec <=
+ RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1;
+ intr_vec++, vec++) {
+ switch (intr_vec) {
+ case RVU_MBOX_PF_INT_VEC_VFPF_MBOX0:
+ irq_data[vec].intr_status =
+ RVU_MBOX_PF_VFPF_INTX(0);
+ irq_data[vec].start = 0;
+ irq_data[vec].mdevs = 64;
+ break;
+ case RVU_MBOX_PF_INT_VEC_VFPF_MBOX1:
+ irq_data[vec].intr_status =
+ RVU_MBOX_PF_VFPF_INTX(1);
+ irq_data[vec].start = 64;
+ irq_data[vec].mdevs = 64;
+ break;
+ case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX0:
+ irq_data[vec].intr_status =
+ RVU_MBOX_PF_VFPF1_INTX(0);
+ irq_data[vec].start = 0;
+ irq_data[vec].mdevs = 64;
+ break;
+ case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1:
+ irq_data[vec].intr_status = RVU_MBOX_PF_VFPF1_INTX(1);
+ irq_data[vec].start = 64;
+ irq_data[vec].mdevs = 64;
+ break;
+ }
+ irq_data[vec].afvf_queue_work_hdlr =
+ rvu_queue_work;
+ offset = pf_vec_start + intr_vec;
+ irq_data[vec].vec_num = offset;
+ irq_data[vec].rvu = rvu;
+
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAF VFAF%d Mbox%d",
+ vec / 2, vec % 2);
+ err = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu->ng_rvu->rvu_mbox_ops->afvf_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE],
+ &irq_data[vec]);
+ if (err) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for AFVF mbox irq\n");
+ return err;
+ }
+ rvu->irq_allocated[offset] = true;
+ }
+
+ return 0;
+}
+
+/* CN20K mbox PFx => AF irq handler */
+static irqreturn_t cn20k_mbox_pf_common_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_irq_data *rvu_irq_data = rvu_irq;
+ struct rvu *rvu = rvu_irq_data->rvu;
+ u64 intr;
+
+ /* Clear interrupts */
+ intr = rvu_read64(rvu, BLKADDR_RVUM, rvu_irq_data->intr_status);
+ rvu_write64(rvu, BLKADDR_RVUM, rvu_irq_data->intr_status, intr);
+
+ if (intr)
+ trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr);
+
+ /* Sync with mbox memory region */
+ rmb();
+
+ rvu_irq_data->rvu_queue_work_hdlr(&rvu->afpf_wq_info,
+ rvu_irq_data->start,
+ rvu_irq_data->mdevs, intr);
+
+ return IRQ_HANDLED;
+}
+
+void cn20k_rvu_enable_mbox_intr(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ /* Clear spurious irqs, if any */
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_MBOX_AF_PFAF_INT(0), INTR_MASK(hw->total_pfs));
+
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_MBOX_AF_PFAF_INT(1), INTR_MASK(hw->total_pfs - 64));
+
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_MBOX_AF_PFAF1_INT(0), INTR_MASK(hw->total_pfs));
+
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_MBOX_AF_PFAF1_INT(1), INTR_MASK(hw->total_pfs - 64));
+
+ /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1S(0),
+ INTR_MASK(hw->total_pfs) & ~1ULL);
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1S(1),
+ INTR_MASK(hw->total_pfs - 64));
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1S(0),
+ INTR_MASK(hw->total_pfs) & ~1ULL);
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1S(1),
+ INTR_MASK(hw->total_pfs - 64));
+}
+
+void cn20k_rvu_unregister_interrupts(struct rvu *rvu)
+{
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1C(0),
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1C(1),
+ INTR_MASK(rvu->hw->total_pfs - 64));
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1C(0),
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1C(1),
+ INTR_MASK(rvu->hw->total_pfs - 64));
+}
+
+int cn20k_register_afpf_mbox_intr(struct rvu *rvu)
+{
+ struct rvu_irq_data *irq_data;
+ int intr_vec, ret, vec = 0;
+
+ /* irq data for 4 PF intr vectors */
+ irq_data = devm_kcalloc(rvu->dev, 4,
+ sizeof(struct rvu_irq_data), GFP_KERNEL);
+ if (!irq_data)
+ return -ENOMEM;
+
+ for (intr_vec = RVU_AF_CN20K_INT_VEC_PFAF_MBOX0; intr_vec <=
+ RVU_AF_CN20K_INT_VEC_PFAF1_MBOX1; intr_vec++,
+ vec++) {
+ switch (intr_vec) {
+ case RVU_AF_CN20K_INT_VEC_PFAF_MBOX0:
+ irq_data[vec].intr_status =
+ RVU_MBOX_AF_PFAF_INT(0);
+ irq_data[vec].start = 0;
+ irq_data[vec].mdevs = 64;
+ break;
+ case RVU_AF_CN20K_INT_VEC_PFAF_MBOX1:
+ irq_data[vec].intr_status =
+ RVU_MBOX_AF_PFAF_INT(1);
+ irq_data[vec].start = 64;
+ irq_data[vec].mdevs = 96;
+ break;
+ case RVU_AF_CN20K_INT_VEC_PFAF1_MBOX0:
+ irq_data[vec].intr_status =
+ RVU_MBOX_AF_PFAF1_INT(0);
+ irq_data[vec].start = 0;
+ irq_data[vec].mdevs = 64;
+ break;
+ case RVU_AF_CN20K_INT_VEC_PFAF1_MBOX1:
+ irq_data[vec].intr_status =
+ RVU_MBOX_AF_PFAF1_INT(1);
+ irq_data[vec].start = 64;
+ irq_data[vec].mdevs = 96;
+ break;
+ }
+ irq_data[vec].rvu_queue_work_hdlr = rvu_queue_work;
+ irq_data[vec].vec_num = intr_vec;
+ irq_data[vec].rvu = rvu;
+
+ /* Register mailbox interrupt handler */
+ sprintf(&rvu->irq_name[intr_vec * NAME_SIZE],
+ "RVUAF PFAF%d Mbox%d",
+ vec / 2, vec % 2);
+ ret = request_irq(pci_irq_vector(rvu->pdev, intr_vec),
+ rvu->ng_rvu->rvu_mbox_ops->pf_intr_handler, 0,
+ &rvu->irq_name[intr_vec * NAME_SIZE],
+ &irq_data[vec]);
+ if (ret)
+ return ret;
+
+ rvu->irq_allocated[intr_vec] = true;
+ }
+
+ return 0;
+}
+
+int cn20k_rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
+ int num, int type, unsigned long *pf_bmap)
+{
+ int region;
+ u64 bar;
+
+ if (type == TYPE_AFVF) {
+ for (region = 0; region < num; region++) {
+ if (!test_bit(region, pf_bmap))
+ continue;
+
+ bar = (u64)phys_to_virt((u64)rvu->ng_rvu->vf_mbox_addr->base);
+ bar += region * MBOX_SIZE;
+ mbox_addr[region] = (void *)bar;
+
+ if (!mbox_addr[region])
+ return -ENOMEM;
+ }
+ return 0;
+ }
+
+ for (region = 0; region < num; region++) {
+ if (!test_bit(region, pf_bmap))
+ continue;
+
+ bar = (u64)phys_to_virt((u64)rvu->ng_rvu->pf_mbox_addr->base);
+ bar += region * MBOX_SIZE;
+
+ mbox_addr[region] = (void *)bar;
+
+ if (!mbox_addr[region])
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int rvu_alloc_mbox_memory(struct rvu *rvu, int type,
+ int ndevs, int mbox_size)
+{
+ struct qmem *mbox_addr;
+ dma_addr_t iova;
+ int pf, err;
+
+ /* Allocate contiguous memory for mailbox communication.
+ * eg: AF <=> PFx mbox memory
+ * This allocated memory is split into chunks of MBOX_SIZE
+ * and setup into each of the RVU PFs. In HW this memory will
+ * get aliased to an offset within BAR2 of those PFs.
+ *
+ * AF will access mbox memory using direct physical addresses
+ * and PFs will access the same shared memory from BAR2.
+ *
+ * PF <=> VF mbox memory also works in the same fashion.
+ * AFPF, PFVF requires IOVA to be used to maintain the mailbox msgs
+ */
+
+ err = qmem_alloc(rvu->dev, &mbox_addr, ndevs, mbox_size);
+ if (err)
+ return -ENOMEM;
+
+ switch (type) {
+ case TYPE_AFPF:
+ rvu->ng_rvu->pf_mbox_addr = mbox_addr;
+ iova = (u64)mbox_addr->iova;
+ for (pf = 0; pf < ndevs; pf++) {
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFX_ADDR(pf),
+ (u64)iova);
+ iova += mbox_size;
+ }
+ break;
+ case TYPE_AFVF:
+ rvu->ng_rvu->vf_mbox_addr = mbox_addr;
+ rvupf_write64(rvu, RVU_PF_VF_MBOX_ADDR, (u64)mbox_addr->iova);
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static struct mbox_ops cn20k_mbox_ops = {
+ .pf_intr_handler = cn20k_mbox_pf_common_intr_handler,
+ .afvf_intr_handler = cn20k_afvf_mbox_intr_handler,
+};
+
+int cn20k_rvu_mbox_init(struct rvu *rvu, int type, int ndevs)
+{
+ int dev;
+
+ if (!is_cn20k(rvu->pdev))
+ return 0;
+
+ rvu->ng_rvu->rvu_mbox_ops = &cn20k_mbox_ops;
+
+ if (type == TYPE_AFVF) {
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_PF_VF_CFG, ilog2(MBOX_SIZE));
+ } else {
+ for (dev = 0; dev < ndevs; dev++)
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_MBOX_AF_PFX_CFG(dev), ilog2(MBOX_SIZE));
+ }
+
+ return rvu_alloc_mbox_memory(rvu, type, ndevs, MBOX_SIZE);
+}
+
+void cn20k_free_mbox_memory(struct rvu *rvu)
+{
+ if (!is_cn20k(rvu->pdev))
+ return;
+
+ qmem_free(rvu->dev, rvu->ng_rvu->pf_mbox_addr);
+ qmem_free(rvu->dev, rvu->ng_rvu->vf_mbox_addr);
+}
+
+void cn20k_rvu_disable_afvf_intr(struct rvu *rvu, int vfs)
+{
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
+
+ if (vfs <= 64)
+ return;
+
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+}
+
+void cn20k_rvu_enable_afvf_intr(struct rvu *rvu, int vfs)
+{
+ /* Clear any pending interrupts and enable AF VF interrupts for
+ * the first 64 VFs.
+ */
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INTX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INTX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(0), INTR_MASK(vfs));
+
+ /* FLR */
+ rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
+
+ /* Same for remaining VFs, if any. */
+ if (vfs <= 64)
+ return;
+
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INTX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INTX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+
+ rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+}
+
+int rvu_alloc_cint_qint_mem(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ int blkaddr, int nixlf)
+{
+ int qints, hwctx_size, err;
+ u64 cfg, ctx_cfg;
+
+ if (is_rvu_otx2(rvu) || is_cn20k(rvu->pdev))
+ return 0;
+
+ ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
+ /* Alloc memory for CQINT's HW contexts */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+ qints = (cfg >> 24) & 0xFFF;
+ hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
+ if (err)
+ return -ENOMEM;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
+ (u64)pfvf->cq_ints_ctx->iova);
+
+ /* Alloc memory for QINT's HW contexts */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+ qints = (cfg >> 12) & 0xFFF;
+ hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
+ if (err)
+ return -ENOMEM;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
+ (u64)pfvf->nix_qints_ctx->iova);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h
new file mode 100644
index 000000000000..affb39803120
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#ifndef RVU_MBOX_REG_H
+#define RVU_MBOX_REG_H
+#include "../rvu.h"
+#include "../rvu_reg.h"
+
+/* RVUM block registers */
+#define RVU_PF_DISC (0x0)
+#define RVU_PRIV_PFX_DISC(a) (0x8000208 | (a) << 16)
+#define RVU_PRIV_HWVFX_DISC(a) (0xD000000 | (a) << 12)
+
+/* Mbox Registers */
+/* RVU AF BAR0 Mbox registers for AF => PFx */
+#define RVU_MBOX_AF_PFX_ADDR(a) (0x5000 | (a) << 4)
+#define RVU_MBOX_AF_PFX_CFG(a) (0x6000 | (a) << 4)
+#define RVU_MBOX_AF_AFPFX_TRIGX(a) (0x9000 | (a) << 3)
+#define RVU_MBOX_AF_PFAF_INT(a) (0x2980 | (a) << 6)
+#define RVU_MBOX_AF_PFAF_INT_W1S(a) (0x2988 | (a) << 6)
+#define RVU_MBOX_AF_PFAF_INT_ENA_W1S(a) (0x2990 | (a) << 6)
+#define RVU_MBOX_AF_PFAF_INT_ENA_W1C(a) (0x2998 | (a) << 6)
+#define RVU_MBOX_AF_PFAF1_INT(a) (0x29A0 | (a) << 6)
+#define RVU_MBOX_AF_PFAF1_INT_W1S(a) (0x29A8 | (a) << 6)
+#define RVU_MBOX_AF_PFAF1_INT_ENA_W1S(a) (0x29B0 | (a) << 6)
+#define RVU_MBOX_AF_PFAF1_INT_ENA_W1C(a) (0x29B8 | (a) << 6)
+
+/* RVU PF => AF mbox registers */
+#define RVU_MBOX_PF_PFAF_TRIGX(a) (0xC00 | (a) << 3)
+#define RVU_MBOX_PF_INT (0xC20)
+#define RVU_MBOX_PF_INT_W1S (0xC28)
+#define RVU_MBOX_PF_INT_ENA_W1S (0xC30)
+#define RVU_MBOX_PF_INT_ENA_W1C (0xC38)
+
+#define RVU_AF_BAR2_SEL (0x9000000)
+#define RVU_AF_BAR2_PFID (0x16400)
+#define NIX_CINTX_INT_W1S(a) (0xd30 | (a) << 12)
+#define NIX_QINTX_CNT(a) (0xc00 | (a) << 12)
+
+#define RVU_MBOX_AF_VFAF_INT(a) (0x3000 | (a) << 6)
+#define RVU_MBOX_AF_VFAF_INT_W1S(a) (0x3008 | (a) << 6)
+#define RVU_MBOX_AF_VFAF_INT_ENA_W1S(a) (0x3010 | (a) << 6)
+#define RVU_MBOX_AF_VFAF_INT_ENA_W1C(a) (0x3018 | (a) << 6)
+#define RVU_MBOX_AF_VFAF_INT_ENA_W1C(a) (0x3018 | (a) << 6)
+#define RVU_MBOX_AF_VFAF1_INT(a) (0x3020 | (a) << 6)
+#define RVU_MBOX_AF_VFAF1_INT_W1S(a) (0x3028 | (a) << 6)
+#define RVU_MBOX_AF_VFAF1_IN_ENA_W1S(a) (0x3030 | (a) << 6)
+#define RVU_MBOX_AF_VFAF1_IN_ENA_W1C(a) (0x3038 | (a) << 6)
+
+#define RVU_MBOX_AF_AFVFX_TRIG(a, b) (0x10000 | (a) << 4 | (b) << 3)
+#define RVU_MBOX_AF_VFX_ADDR(a) (0x20000 | (a) << 4)
+#define RVU_MBOX_AF_VFX_CFG(a) (0x28000 | (a) << 4)
+
+#define RVU_MBOX_PF_VFX_PFVF_TRIGX(a) (0x2000 | (a) << 3)
+
+#define RVU_MBOX_PF_VFPF_INTX(a) (0x1000 | (a) << 3)
+#define RVU_MBOX_PF_VFPF_INT_W1SX(a) (0x1020 | (a) << 3)
+#define RVU_MBOX_PF_VFPF_INT_ENA_W1SX(a) (0x1040 | (a) << 3)
+#define RVU_MBOX_PF_VFPF_INT_ENA_W1CX(a) (0x1060 | (a) << 3)
+
+#define RVU_MBOX_PF_VFPF1_INTX(a) (0x1080 | (a) << 3)
+#define RVU_MBOX_PF_VFPF1_INT_W1SX(a) (0x10a0 | (a) << 3)
+#define RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(a) (0x10c0 | (a) << 3)
+#define RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(a) (0x10e0 | (a) << 3)
+
+#define RVU_MBOX_PF_VF_ADDR (0xC40)
+#define RVU_MBOX_PF_LMTLINE_ADDR (0xC48)
+#define RVU_MBOX_PF_VF_CFG (0xC60)
+
+#define RVU_MBOX_VF_VFPF_TRIGX(a) (0x3000 | (a) << 3)
+#define RVU_MBOX_VF_INT (0x20)
+#define RVU_MBOX_VF_INT_W1S (0x28)
+#define RVU_MBOX_VF_INT_ENA_W1S (0x30)
+#define RVU_MBOX_VF_INT_ENA_W1C (0x38)
+
+#define RVU_MBOX_VF_VFAF_TRIGX(a) (0x2000 | (a) << 3)
+#endif /* RVU_MBOX_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h
new file mode 100644
index 000000000000..76ce3ec6da9c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#ifndef STRUCT_H
+#define STRUCT_H
+
+/*
+ * CN20k RVU PF MBOX Interrupt Vector Enumeration
+ *
+ * Vectors 0 - 3 are compatible with pre cn20k and hence
+ * existing macros are being reused.
+ */
+enum rvu_mbox_pf_int_vec_e {
+ RVU_MBOX_PF_INT_VEC_VFPF_MBOX0 = 0x4,
+ RVU_MBOX_PF_INT_VEC_VFPF_MBOX1 = 0x5,
+ RVU_MBOX_PF_INT_VEC_VFPF1_MBOX0 = 0x6,
+ RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1 = 0x7,
+ RVU_MBOX_PF_INT_VEC_AFPF_MBOX = 0x8,
+ RVU_MBOX_PF_INT_VEC_CNT = 0x9,
+};
+
+/* RVU Admin function Interrupt Vector Enumeration */
+enum rvu_af_cn20k_int_vec_e {
+ RVU_AF_CN20K_INT_VEC_POISON = 0x0,
+ RVU_AF_CN20K_INT_VEC_PFFLR0 = 0x1,
+ RVU_AF_CN20K_INT_VEC_PFFLR1 = 0x2,
+ RVU_AF_CN20K_INT_VEC_PFME0 = 0x3,
+ RVU_AF_CN20K_INT_VEC_PFME1 = 0x4,
+ RVU_AF_CN20K_INT_VEC_GEN = 0x5,
+ RVU_AF_CN20K_INT_VEC_PFAF_MBOX0 = 0x6,
+ RVU_AF_CN20K_INT_VEC_PFAF_MBOX1 = 0x7,
+ RVU_AF_CN20K_INT_VEC_PFAF1_MBOX0 = 0x8,
+ RVU_AF_CN20K_INT_VEC_PFAF1_MBOX1 = 0x9,
+ RVU_AF_CN20K_INT_VEC_CNT = 0xa,
+};
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index 406c59100a35..8a08bebf08c2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -39,7 +39,7 @@ struct qmem {
void *base;
dma_addr_t iova;
int alloc_sz;
- u16 entry_sz;
+ u32 entry_sz;
u8 align;
u32 qsize;
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index 7d21905deed8..75872d257eca 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -10,8 +10,11 @@
#include <linux/pci.h>
#include "rvu_reg.h"
+#include "cn20k/reg.h"
+#include "cn20k/api.h"
#include "mbox.h"
#include "rvu_trace.h"
+#include "rvu.h"
static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
@@ -28,8 +31,10 @@ void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
mdev->rsp_size = 0;
tx_hdr->num_msgs = 0;
tx_hdr->msg_size = 0;
+ tx_hdr->sig = 0;
rx_hdr->num_msgs = 0;
rx_hdr->msg_size = 0;
+ rx_hdr->sig = 0;
}
EXPORT_SYMBOL(__otx2_mbox_reset);
@@ -53,9 +58,98 @@ void otx2_mbox_destroy(struct otx2_mbox *mbox)
}
EXPORT_SYMBOL(otx2_mbox_destroy);
+int cn20k_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ void *reg_base, int direction, int ndevs)
+{
+ switch (direction) {
+ case MBOX_DIR_AFPF:
+ case MBOX_DIR_PFVF:
+ mbox->tx_start = MBOX_DOWN_TX_START;
+ mbox->rx_start = MBOX_DOWN_RX_START;
+ mbox->tx_size = MBOX_DOWN_TX_SIZE;
+ mbox->rx_size = MBOX_DOWN_RX_SIZE;
+ break;
+ case MBOX_DIR_PFAF:
+ case MBOX_DIR_VFPF:
+ mbox->tx_start = MBOX_DOWN_RX_START;
+ mbox->rx_start = MBOX_DOWN_TX_START;
+ mbox->tx_size = MBOX_DOWN_RX_SIZE;
+ mbox->rx_size = MBOX_DOWN_TX_SIZE;
+ break;
+ case MBOX_DIR_AFPF_UP:
+ case MBOX_DIR_PFVF_UP:
+ mbox->tx_start = MBOX_UP_TX_START;
+ mbox->rx_start = MBOX_UP_RX_START;
+ mbox->tx_size = MBOX_UP_TX_SIZE;
+ mbox->rx_size = MBOX_UP_RX_SIZE;
+ break;
+ case MBOX_DIR_PFAF_UP:
+ case MBOX_DIR_VFPF_UP:
+ mbox->tx_start = MBOX_UP_RX_START;
+ mbox->rx_start = MBOX_UP_TX_START;
+ mbox->tx_size = MBOX_UP_RX_SIZE;
+ mbox->rx_size = MBOX_UP_TX_SIZE;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ switch (direction) {
+ case MBOX_DIR_AFPF:
+ mbox->trigger = RVU_MBOX_AF_AFPFX_TRIGX(1);
+ mbox->tr_shift = 4;
+ break;
+ case MBOX_DIR_AFPF_UP:
+ mbox->trigger = RVU_MBOX_AF_AFPFX_TRIGX(0);
+ mbox->tr_shift = 4;
+ break;
+ case MBOX_DIR_PFAF:
+ mbox->trigger = RVU_MBOX_PF_PFAF_TRIGX(0);
+ mbox->tr_shift = 0;
+ break;
+ case MBOX_DIR_PFAF_UP:
+ mbox->trigger = RVU_MBOX_PF_PFAF_TRIGX(1);
+ mbox->tr_shift = 0;
+ break;
+ case MBOX_DIR_PFVF:
+ mbox->trigger = RVU_MBOX_PF_VFX_PFVF_TRIGX(1);
+ mbox->tr_shift = 4;
+ break;
+ case MBOX_DIR_PFVF_UP:
+ mbox->trigger = RVU_MBOX_PF_VFX_PFVF_TRIGX(0);
+ mbox->tr_shift = 4;
+ break;
+ case MBOX_DIR_VFPF:
+ mbox->trigger = RVU_MBOX_VF_VFPF_TRIGX(0);
+ mbox->tr_shift = 0;
+ break;
+ case MBOX_DIR_VFPF_UP:
+ mbox->trigger = RVU_MBOX_VF_VFPF_TRIGX(1);
+ mbox->tr_shift = 0;
+ break;
+ default:
+ return -ENODEV;
+ }
+ mbox->reg_base = reg_base;
+ mbox->pdev = pdev;
+
+ mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL);
+ if (!mbox->dev) {
+ otx2_mbox_destroy(mbox);
+ return -ENOMEM;
+ }
+ mbox->ndevs = ndevs;
+
+ return 0;
+}
+
static int otx2_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev,
void *reg_base, int direction, int ndevs)
{
+ if (is_cn20k(pdev))
+ return cn20k_mbox_setup(mbox, pdev, reg_base,
+ direction, ndevs);
+
switch (direction) {
case MBOX_DIR_AFPF:
case MBOX_DIR_PFVF:
@@ -234,7 +328,10 @@ static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data)
spin_lock(&mdev->mbox_lock);
- tx_hdr->msg_size = mdev->msg_size;
+ if (!tx_hdr->sig) {
+ tx_hdr->msg_size = mdev->msg_size;
+ tx_hdr->num_msgs = mdev->num_msgs;
+ }
/* Reset header for next messages */
mdev->msg_size = 0;
@@ -248,7 +345,6 @@ static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data)
* messages. So this should be written after writing all the messages
* to the shared memory.
*/
- tx_hdr->num_msgs = mdev->num_msgs;
rx_hdr->num_msgs = 0;
msg = (struct mbox_msghdr *)(hw_mbase + mbox->tx_start + msgs_offset);
@@ -309,6 +405,7 @@ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
{
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_msghdr *msghdr = NULL;
+ struct mbox_hdr *mboxhdr = NULL;
spin_lock(&mdev->mbox_lock);
size = ALIGN(size, MBOX_MSG_ALIGN);
@@ -332,6 +429,11 @@ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
mdev->msg_size += size;
mdev->rsp_size += size_rsp;
msghdr->next_msgoff = mdev->msg_size + msgs_offset;
+
+ mboxhdr = mdev->mbase + mbox->tx_start;
+ /* Clear the msg header region */
+ memset(mboxhdr, 0, msgs_offset);
+
exit:
spin_unlock(&mdev->mbox_lock);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index a213b2663583..933073cd2280 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -10,9 +10,11 @@
#include <linux/etherdevice.h>
#include <linux/sizes.h>
+#include <linux/ethtool.h>
#include "rvu_struct.h"
#include "common.h"
+#include "cn20k/struct.h"
#define MBOX_SIZE SZ_64K
@@ -50,6 +52,11 @@
#define MBOX_DIR_PFVF_UP 6 /* PF sends messages to VF */
#define MBOX_DIR_VFPF_UP 7 /* VF replies to PF */
+enum {
+ TYPE_AFVF,
+ TYPE_AFPF,
+};
+
struct otx2_mbox_dev {
void *mbase; /* This dev's mbox region */
void *hwbase;
@@ -78,6 +85,8 @@ struct otx2_mbox {
struct mbox_hdr {
u64 msg_size; /* Total msgs size embedded */
u16 num_msgs; /* No of msgs embedded */
+ u16 opt_msg;
+ u8 sig;
};
/* Header which precedes every msg and is also part of it */
@@ -650,11 +659,17 @@ struct cgx_lmac_fwdata_s {
u64 supported_link_modes;
/* only applicable if AN is supported */
u64 advertised_fec;
- u64 advertised_link_modes;
+ u64 advertised_link_modes_own:1; /* CGX_CMD_OWN */
+ u64 advertised_link_modes:63;
/* Only applicable if SFP/QSFP slot is present */
struct sfp_eeprom_s sfp_eeprom;
struct phy_s phy;
-#define LMAC_FWDATA_RESERVED_MEM 1021
+ u32 lmac_type;
+ u32 portm_idx;
+ u64 mgmt_port:1;
+ u64 advertised_an:1;
+ u64 port;
+#define LMAC_FWDATA_RESERVED_MEM 1018
u64 reserved[LMAC_FWDATA_RESERVED_MEM];
};
@@ -667,12 +682,13 @@ struct cgx_set_link_mode_args {
u32 speed;
u8 duplex;
u8 an;
- u8 ports;
+ u8 mode_baseidx;
+ u8 multimode;
u64 mode;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
};
struct cgx_set_link_mode_req {
-#define AUTONEG_UNKNOWN 0xff
struct mbox_msghdr hdr;
struct cgx_set_link_mode_args args;
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
index 0277d226293e..d7030dfa5dad 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
@@ -97,7 +97,7 @@ int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event)
if (pcifunc & RVU_PFVF_FUNC_MASK)
pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
else
- pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
+ pfvf = &mcs->pf[rvu_get_pf(rvu->pdev, pcifunc)];
event->intr_mask &= pfvf->intr_mask;
@@ -123,7 +123,7 @@ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
struct mcs_intr_info *req;
int pf;
- pf = rvu_get_pf(event->pcifunc);
+ pf = rvu_get_pf(rvu->pdev, event->pcifunc);
mutex_lock(&rvu->mbox_lock);
@@ -193,7 +193,7 @@ int rvu_mbox_handler_mcs_intr_cfg(struct rvu *rvu,
if (pcifunc & RVU_PFVF_FUNC_MASK)
pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
else
- pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
+ pfvf = &mcs->pf[rvu_get_pf(rvu->pdev, pcifunc)];
mcs->pf_map[0] = pcifunc;
pfvf->intr_mask = req->intr_mask;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index a8025f0486c9..c6bb3aaa8e0d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -20,6 +20,8 @@
#include "rvu_trace.h"
#include "rvu_npc_hash.h"
+#include "cn20k/reg.h"
+#include "cn20k/api.h"
#define DRV_NAME "rvu_af"
#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
@@ -34,10 +36,8 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
int type, int num,
void (mbox_handler)(struct work_struct *),
void (mbox_up_handler)(struct work_struct *));
-enum {
- TYPE_AFVF,
- TYPE_AFPF,
-};
+static irqreturn_t rvu_mbox_pf_intr_handler(int irq, void *rvu_irq);
+static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq);
/* Supported devices */
static const struct pci_device_id rvu_id_table[] = {
@@ -294,7 +294,7 @@ int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
devnum = rvu_get_hwvf(rvu, pcifunc);
} else {
is_pf = true;
- devnum = rvu_get_pf(pcifunc);
+ devnum = rvu_get_pf(rvu->pdev, pcifunc);
}
/* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or
@@ -359,7 +359,7 @@ static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
devnum = rvu_get_hwvf(rvu, pcifunc);
} else {
is_pf = true;
- devnum = rvu_get_pf(pcifunc);
+ devnum = rvu_get_pf(rvu->pdev, pcifunc);
}
block->fn_map[lf] = attach ? pcifunc : 0;
@@ -400,11 +400,6 @@ static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs);
}
-inline int rvu_get_pf(u16 pcifunc)
-{
- return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
-}
-
void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
{
u64 cfg;
@@ -422,7 +417,7 @@ int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
int pf, func;
u64 cfg;
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
func = pcifunc & RVU_PFVF_FUNC_MASK;
/* Get first HWVF attached to this PF */
@@ -437,7 +432,7 @@ struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
if (pcifunc & RVU_PFVF_FUNC_MASK)
return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)];
else
- return &rvu->pf[rvu_get_pf(pcifunc)];
+ return &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)];
}
static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
@@ -445,7 +440,7 @@ static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
int pf, vf, nvfs;
u64 cfg;
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
if (pf >= rvu->hw->total_pfs)
return false;
@@ -760,6 +755,11 @@ static void rvu_free_hw_resources(struct rvu *rvu)
rvu_reset_msix(rvu);
mutex_destroy(&rvu->rsrc_lock);
+
+ /* Free the QINT/CINT memory */
+ pfvf = &rvu->pf[RVU_AFPF];
+ qmem_free(rvu->dev, pfvf->nix_qints_ctx);
+ qmem_free(rvu->dev, pfvf->cq_ints_ctx);
}
static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
@@ -1487,7 +1487,7 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
/* All CGX mapped PFs are set with assigned NIX block during init */
- if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
+ if (is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) {
blkaddr = pf->nix_blkaddr;
} else if (is_lbk_vf(rvu, pcifunc)) {
vf = pcifunc - 1;
@@ -1501,7 +1501,7 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
}
/* if SDP1 then the blkaddr is NIX1 */
- if (is_sdp_pfvf(pcifunc) && pf->sdp_info->node_id == 1)
+ if (is_sdp_pfvf(rvu, pcifunc) && pf->sdp_info->node_id == 1)
blkaddr = BLKADDR_NIX1;
switch (blkaddr) {
@@ -2006,7 +2006,7 @@ int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
vf = pcifunc & RVU_PFVF_FUNC_MASK;
cfg = rvu_read64(rvu, BLKADDR_RVUM,
- RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc)));
+ RVU_PRIV_PFX_CFG(rvu_get_pf(rvu->pdev, pcifunc)));
numvfs = (cfg >> 12) & 0xFF;
if (vf && vf <= numvfs)
@@ -2223,15 +2223,30 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type, bool poll)
offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+ if (req_hdr->sig && !(is_rvu_otx2(rvu) || is_cn20k(rvu->pdev))) {
+ req_hdr->opt_msg = mw->mbox_wrk[devid].num_msgs;
+ rvu_write64(rvu, BLKADDR_NIX0, RVU_AF_BAR2_SEL,
+ RVU_AF_BAR2_PFID);
+ if (type == TYPE_AFPF)
+ rvu_write64(rvu, BLKADDR_NIX0,
+ AF_BAR2_ALIASX(0, NIX_CINTX_INT_W1S(devid)),
+ 0x1);
+ else
+ rvu_write64(rvu, BLKADDR_NIX0,
+ AF_BAR2_ALIASX(0, NIX_QINTX_CNT(devid)),
+ 0x1);
+ usleep_range(5000, 6000);
+ goto done;
+ }
+
for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) {
msg = mdev->mbase + offset;
/* Set which PF/VF sent this message based on mbox IRQ */
switch (type) {
case TYPE_AFPF:
- msg->pcifunc &=
- ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
- msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT);
+ msg->pcifunc &= rvu_pcifunc_pf_mask(rvu->pdev);
+ msg->pcifunc |= rvu_make_pcifunc(rvu->pdev, devid, 0);
break;
case TYPE_AFVF:
msg->pcifunc &=
@@ -2249,16 +2264,17 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type, bool poll)
if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
err, otx2_mbox_id2name(msg->id),
- msg->id, rvu_get_pf(msg->pcifunc),
+ msg->id, rvu_get_pf(rvu->pdev, msg->pcifunc),
(msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
else
dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
err, otx2_mbox_id2name(msg->id),
msg->id, devid);
}
+done:
mw->mbox_wrk[devid].num_msgs = 0;
- if (poll)
+ if (!is_cn20k(mbox->pdev) && poll)
otx2_mbox_wait_for_zero(mbox, devid);
/* Send mbox responses to VF/PF */
@@ -2364,13 +2380,21 @@ static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
__rvu_mbox_up_handler(mwork, TYPE_AFVF);
}
-static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
+static int rvu_get_mbox_regions(struct rvu *rvu, void __iomem **mbox_addr,
int num, int type, unsigned long *pf_bmap)
{
struct rvu_hwinfo *hw = rvu->hw;
int region;
u64 bar4;
+ /* For cn20k platform AF mailbox region is allocated by software
+ * and the corresponding IOVA is programmed in hardware unlike earlier
+ * silicons where software uses the hardware region after ioremap.
+ */
+ if (is_cn20k(rvu->pdev))
+ return cn20k_rvu_get_mbox_regions(rvu, (void *)mbox_addr,
+ num, type, pf_bmap);
+
/* For cn10k platform VF mailbox regions of a PF follows after the
* PF <-> AF mailbox region. Whereas for Octeontx2 it is read from
* RVU_PF_VF_BAR4_ADDR register.
@@ -2389,7 +2413,7 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
bar4 += region * MBOX_SIZE;
}
- mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
+ mbox_addr[region] = ioremap_wc(bar4, MBOX_SIZE);
if (!mbox_addr[region])
goto error;
}
@@ -2412,7 +2436,7 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
RVU_AF_PF_BAR4_ADDR);
bar4 += region * MBOX_SIZE;
}
- mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
+ mbox_addr[region] = ioremap_wc(bar4, MBOX_SIZE);
if (!mbox_addr[region])
goto error;
}
@@ -2420,20 +2444,26 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
error:
while (region--)
- iounmap((void __iomem *)mbox_addr[region]);
+ iounmap(mbox_addr[region]);
return -ENOMEM;
}
+static struct mbox_ops rvu_mbox_ops = {
+ .pf_intr_handler = rvu_mbox_pf_intr_handler,
+ .afvf_intr_handler = rvu_mbox_intr_handler,
+};
+
static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
int type, int num,
void (mbox_handler)(struct work_struct *),
void (mbox_up_handler)(struct work_struct *))
{
- int err = -EINVAL, i, dir, dir_up;
+ void __iomem **mbox_regions;
+ struct ng_rvu *ng_rvu_mbox;
+ int err, i, dir, dir_up;
void __iomem *reg_base;
struct rvu_work *mwork;
unsigned long *pf_bmap;
- void **mbox_regions;
const char *name;
u64 cfg;
@@ -2441,6 +2471,12 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
if (!pf_bmap)
return -ENOMEM;
+ ng_rvu_mbox = kzalloc(sizeof(*ng_rvu_mbox), GFP_KERNEL);
+ if (!ng_rvu_mbox) {
+ err = -ENOMEM;
+ goto free_bitmap;
+ }
+
/* RVU VFs */
if (type == TYPE_AFVF)
bitmap_set(pf_bmap, 0, num);
@@ -2454,12 +2490,20 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
}
}
+ rvu->ng_rvu = ng_rvu_mbox;
+
+ rvu->ng_rvu->rvu_mbox_ops = &rvu_mbox_ops;
+
+ err = cn20k_rvu_mbox_init(rvu, type, num);
+ if (err)
+ goto free_mem;
+
mutex_init(&rvu->mbox_lock);
- mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
+ mbox_regions = kcalloc(num, sizeof(void __iomem *), GFP_KERNEL);
if (!mbox_regions) {
err = -ENOMEM;
- goto free_bitmap;
+ goto free_qmem;
}
switch (type) {
@@ -2482,11 +2526,12 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
goto free_regions;
break;
default:
+ err = -EINVAL;
goto free_regions;
}
mw->mbox_wq = alloc_workqueue("%s",
- WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
+ WQ_HIGHPRI | WQ_MEM_RECLAIM,
num, name);
if (!mw->mbox_wq) {
err = -ENOMEM;
@@ -2529,7 +2574,11 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
mwork->rvu = rvu;
INIT_WORK(&mwork->work, mbox_up_handler);
}
- goto free_regions;
+
+ kfree(mbox_regions);
+ bitmap_free(pf_bmap);
+
+ return 0;
exit:
destroy_workqueue(mw->mbox_wq);
@@ -2538,6 +2587,10 @@ unmap_regions:
iounmap((void __iomem *)mbox_regions[num]);
free_regions:
kfree(mbox_regions);
+free_qmem:
+ cn20k_free_mbox_memory(rvu);
+free_mem:
+ kfree(rvu->ng_rvu);
free_bitmap:
bitmap_free(pf_bmap);
return err;
@@ -2564,8 +2617,8 @@ static void rvu_mbox_destroy(struct mbox_wq_info *mw)
otx2_mbox_destroy(&mw->mbox_up);
}
-static void rvu_queue_work(struct mbox_wq_info *mw, int first,
- int mdevs, u64 intr)
+void rvu_queue_work(struct mbox_wq_info *mw, int first,
+ int mdevs, u64 intr)
{
struct otx2_mbox_dev *mdev;
struct otx2_mbox *mbox;
@@ -2656,6 +2709,11 @@ static void rvu_enable_mbox_intr(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
+ if (is_cn20k(rvu->pdev)) {
+ cn20k_rvu_enable_mbox_intr(rvu);
+ return;
+ }
+
/* Clear spurious irqs, if any */
rvu_write64(rvu, BLKADDR_RVUM,
RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs));
@@ -2773,7 +2831,7 @@ static void rvu_flr_handler(struct work_struct *work)
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
numvfs = (cfg >> 12) & 0xFF;
- pcifunc = pf << RVU_PFVF_PF_SHIFT;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
for (vf = 0; vf < numvfs; vf++)
__rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
@@ -2909,9 +2967,12 @@ static void rvu_unregister_interrupts(struct rvu *rvu)
rvu_cpt_unregister_interrupts(rvu);
- /* Disable the Mbox interrupt */
- rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
- INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+ if (!is_cn20k(rvu->pdev))
+ /* Disable the Mbox interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+ else
+ cn20k_rvu_unregister_interrupts(rvu);
/* Disable the PF FLR interrupt */
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
@@ -2944,6 +3005,10 @@ static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu)
* VF interrupts can be handled. Offset equal to zero means
* that PF vectors are not configured and overlapping AF vectors.
*/
+ if (is_cn20k(rvu->pdev))
+ return (pfvf->msix.max >= RVU_AF_CN20K_INT_VEC_CNT +
+ RVU_MBOX_PF_INT_VEC_CNT) && offset;
+
return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) &&
offset;
}
@@ -2974,18 +3039,30 @@ static int rvu_register_interrupts(struct rvu *rvu)
return ret;
}
- /* Register mailbox interrupt handler */
- sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
- ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
- rvu_mbox_pf_intr_handler, 0,
- &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
- if (ret) {
- dev_err(rvu->dev,
- "RVUAF: IRQ registration failed for mbox irq\n");
- goto fail;
- }
+ if (!is_cn20k(rvu->pdev)) {
+ /* Register mailbox interrupt handler */
+ sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE],
+ "RVUAF Mbox");
+ ret = request_irq(pci_irq_vector
+ (rvu->pdev, RVU_AF_INT_VEC_MBOX),
+ rvu->ng_rvu->rvu_mbox_ops->pf_intr_handler, 0,
+ &rvu->irq_name[RVU_AF_INT_VEC_MBOX *
+ NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for mbox\n");
+ goto fail;
+ }
- rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
+ rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
+ } else {
+ ret = cn20k_register_afpf_mbox_intr(rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for mbox\n");
+ goto fail;
+ }
+ }
/* Enable mailbox interrupts from all PFs */
rvu_enable_mbox_intr(rvu);
@@ -3040,34 +3117,40 @@ static int rvu_register_interrupts(struct rvu *rvu)
/* Get PF MSIX vectors offset. */
pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM,
RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
+ if (!is_cn20k(rvu->pdev)) {
+ /* Register MBOX0 interrupt. */
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu->ng_rvu->rvu_mbox_ops->afvf_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE],
+ rvu);
+ if (ret)
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for Mbox0\n");
- /* Register MBOX0 interrupt. */
- offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
- sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
- ret = request_irq(pci_irq_vector(rvu->pdev, offset),
- rvu_mbox_intr_handler, 0,
- &rvu->irq_name[offset * NAME_SIZE],
- rvu);
- if (ret)
- dev_err(rvu->dev,
- "RVUAF: IRQ registration failed for Mbox0\n");
-
- rvu->irq_allocated[offset] = true;
+ rvu->irq_allocated[offset] = true;
- /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
- * simply increment current offset by 1.
- */
- offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
- sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
- ret = request_irq(pci_irq_vector(rvu->pdev, offset),
- rvu_mbox_intr_handler, 0,
- &rvu->irq_name[offset * NAME_SIZE],
- rvu);
- if (ret)
- dev_err(rvu->dev,
- "RVUAF: IRQ registration failed for Mbox1\n");
+ /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
+ * simply increment current offset by 1.
+ */
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu->ng_rvu->rvu_mbox_ops->afvf_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE],
+ rvu);
+ if (ret)
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for Mbox1\n");
- rvu->irq_allocated[offset] = true;
+ rvu->irq_allocated[offset] = true;
+ } else {
+ ret = cn20k_register_afvf_mbox_intr(rvu, pf_vec_start);
+ if (ret)
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for Mbox\n");
+ }
/* Register FLR interrupt handler for AF's VFs */
offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0;
@@ -3178,6 +3261,9 @@ static void rvu_disable_afvf_intr(struct rvu *rvu)
{
int vfs = rvu->vfs;
+ if (is_cn20k(rvu->pdev))
+ return cn20k_rvu_disable_afvf_intr(rvu, vfs);
+
rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs));
rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
@@ -3194,6 +3280,9 @@ static void rvu_enable_afvf_intr(struct rvu *rvu)
{
int vfs = rvu->vfs;
+ if (is_cn20k(rvu->pdev))
+ return cn20k_rvu_enable_afvf_intr(rvu, vfs);
+
/* Clear any pending interrupts and enable AF VF interrupts for
* the first 64 VFs.
*/
@@ -3438,6 +3527,9 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ptp_start(rvu, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate,
rvu->fwdata->ptp_ext_tstamp);
+ /* Alloc CINT and QINT memory */
+ rvu_alloc_cint_qint_mem(rvu, &rvu->pf[RVU_AFPF], BLKADDR_NIX0,
+ (rvu->hw->block[BLKADDR_NIX0].lf.max));
return 0;
err_dl:
rvu_unregister_dl(rvu);
@@ -3489,6 +3581,9 @@ static void rvu_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
devm_kfree(&pdev->dev, rvu->hw);
+ if (is_cn20k(rvu->pdev))
+ cn20k_free_mbox_memory(rvu);
+ kfree(rvu->ng_rvu);
devm_kfree(&pdev->dev, rvu);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 48f66292ad5c..7ee1fdeb5295 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -10,6 +10,7 @@
#include <linux/pci.h>
#include <net/devlink.h>
+#include <linux/soc/marvell/silicons.h>
#include "rvu_struct.h"
#include "rvu_devlink.h"
@@ -43,12 +44,39 @@
#define MAX_CPT_BLKS 2
/* PF_FUNC */
-#define RVU_PFVF_PF_SHIFT 10
-#define RVU_PFVF_PF_MASK 0x3F
-#define RVU_PFVF_FUNC_SHIFT 0
-#define RVU_PFVF_FUNC_MASK 0x3FF
+#define RVU_OTX2_PFVF_PF_SHIFT 10
+#define RVU_OTX2_PFVF_PF_MASK 0x3F
+#define RVU_PFVF_FUNC_SHIFT 0
+#define RVU_PFVF_FUNC_MASK 0x3FF
+#define RVU_CN20K_PFVF_PF_SHIFT 9
+#define RVU_CN20K_PFVF_PF_MASK 0x7F
+
+static inline u16 rvu_make_pcifunc(struct pci_dev *pdev, int pf, int func)
+{
+ if (is_cn20k(pdev))
+ return ((pf & RVU_CN20K_PFVF_PF_MASK) <<
+ RVU_CN20K_PFVF_PF_SHIFT) |
+ ((func & RVU_PFVF_FUNC_MASK) <<
+ RVU_PFVF_FUNC_SHIFT);
+ else
+ return ((pf & RVU_OTX2_PFVF_PF_MASK) <<
+ RVU_OTX2_PFVF_PF_SHIFT) |
+ ((func & RVU_PFVF_FUNC_MASK) <<
+ RVU_PFVF_FUNC_SHIFT);
+}
+
+static inline int rvu_pcifunc_pf_mask(struct pci_dev *pdev)
+{
+ if (is_cn20k(pdev))
+ return ~(RVU_CN20K_PFVF_PF_MASK << RVU_CN20K_PFVF_PF_SHIFT);
+ else
+ return ~(RVU_OTX2_PFVF_PF_MASK << RVU_OTX2_PFVF_PF_SHIFT);
+}
+
+#define RVU_AFPF 25
#ifdef CONFIG_DEBUG_FS
+
struct dump_ctx {
int lf;
int id;
@@ -446,6 +474,23 @@ struct mbox_wq_info {
struct workqueue_struct *mbox_wq;
};
+struct rvu_irq_data {
+ u64 intr_status;
+ void (*rvu_queue_work_hdlr)(struct mbox_wq_info *mw, int first,
+ int mdevs, u64 intr);
+ void (*afvf_queue_work_hdlr)(struct mbox_wq_info *mw, int first,
+ int mdevs, u64 intr);
+ struct rvu *rvu;
+ int vec_num;
+ int start;
+ int mdevs;
+};
+
+struct mbox_ops {
+ irqreturn_t (*pf_intr_handler)(int irq, void *rvu_irq);
+ irqreturn_t (*afvf_intr_handler)(int irq, void *rvu_irq);
+};
+
struct channel_fwdata {
struct sdp_node_info info;
u8 valid;
@@ -611,6 +656,8 @@ struct rvu {
struct list_head rep_evtq_head;
/* Representor event lock */
spinlock_t rep_evtq_lock;
+
+ struct ng_rvu *ng_rvu;
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@@ -836,7 +883,6 @@ int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc);
void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start);
bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc);
u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr);
-int rvu_get_pf(u16 pcifunc);
struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc);
void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf);
bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr);
@@ -865,8 +911,8 @@ void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq);
/* SDP APIs */
int rvu_sdp_init(struct rvu *rvu);
-bool is_sdp_pfvf(u16 pcifunc);
-bool is_sdp_pf(u16 pcifunc);
+bool is_sdp_pfvf(struct rvu *rvu, u16 pcifunc);
+bool is_sdp_pf(struct rvu *rvu, u16 pcifunc);
bool is_sdp_vf(struct rvu *rvu, u16 pcifunc);
static inline bool is_rep_dev(struct rvu *rvu, u16 pcifunc)
@@ -877,11 +923,21 @@ static inline bool is_rep_dev(struct rvu *rvu, u16 pcifunc)
return false;
}
+static inline int rvu_get_pf(struct pci_dev *pdev, u16 pcifunc)
+{
+ if (is_cn20k(pdev))
+ return (pcifunc >> RVU_CN20K_PFVF_PF_SHIFT) &
+ RVU_CN20K_PFVF_PF_MASK;
+ else
+ return (pcifunc >> RVU_OTX2_PFVF_PF_SHIFT) &
+ RVU_OTX2_PFVF_PF_MASK;
+}
+
/* CGX APIs */
static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf)
{
return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs) &&
- !is_sdp_pf(pf << RVU_PFVF_PF_SHIFT);
+ !is_sdp_pf(rvu, rvu_make_pcifunc(rvu->pdev, pf, 0));
}
static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
@@ -893,7 +949,7 @@ static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
static inline bool is_cgx_vf(struct rvu *rvu, u16 pcifunc)
{
return ((pcifunc & RVU_PFVF_FUNC_MASK) &&
- is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)));
+ is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc)));
}
#define M(_name, _id, fn_name, req, rsp) \
@@ -901,6 +957,10 @@ int rvu_mbox_handler_ ## fn_name(struct rvu *, struct req *, struct rsp *);
MBOX_MESSAGES
#undef M
+/* Mbox APIs */
+void rvu_queue_work(struct mbox_wq_info *mw, int first,
+ int mdevs, u64 intr);
+
int rvu_cgx_init(struct rvu *rvu);
int rvu_cgx_exit(struct rvu *rvu);
void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu);
@@ -955,7 +1015,8 @@ int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc,
int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc,
u32 mcast_grp_idx, u16 mcam_index);
void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc);
-
+int rvu_alloc_cint_qint_mem(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ int blkaddr, int nixlf);
/* NPC APIs */
void rvu_npc_freemem(struct rvu *rvu);
int rvu_npc_get_pkind(struct rvu *rvu, u16 pf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index d0331b0e0bfd..3303c475414a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -457,7 +457,7 @@ int rvu_cgx_exit(struct rvu *rvu)
inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
{
if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
- !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
+ !is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc)))
return false;
return true;
}
@@ -484,7 +484,7 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
void *cgxd;
@@ -501,7 +501,7 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
void *cgxd;
@@ -526,7 +526,7 @@ int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)
void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
int i = 0, lmac_count = 0;
struct mac_ops *mac_ops;
u8 max_dmac_filters;
@@ -577,7 +577,7 @@ int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req,
void *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
struct mac_ops *mac_ops;
int stat = 0, err = 0;
u64 tx_stat, rx_stat;
@@ -633,7 +633,7 @@ int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req,
int rvu_mbox_handler_cgx_stats_rst(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
struct rvu_pfvf *parent_pf;
struct mac_ops *mac_ops;
u8 cgx_idx, lmac;
@@ -663,7 +663,7 @@ int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
struct msg_req *req,
struct cgx_fec_stats_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
struct mac_ops *mac_ops;
u8 cgx_idx, lmac;
void *cgxd;
@@ -681,17 +681,20 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
+ struct rvu_pfvf *pfvf;
u8 cgx_id, lmac_id;
- if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
- return -EPERM;
+ if (!is_pf_cgxmapped(rvu, pf))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
if (rvu_npc_exact_has_match_table(rvu))
return rvu_npc_exact_mac_addr_set(rvu, req, rsp);
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ pfvf = &rvu->pf[pf];
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
return 0;
@@ -701,7 +704,7 @@ int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
struct cgx_mac_addr_add_req *req,
struct cgx_mac_addr_add_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
int rc = 0;
@@ -725,7 +728,7 @@ int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
struct cgx_mac_addr_del_req *req,
struct msg_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
@@ -743,7 +746,7 @@ int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
struct cgx_max_dmac_entries_get_rsp
*rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
/* If msg is received from PFs(which are not mapped to CGX LMACs)
@@ -769,20 +772,12 @@ int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
- u8 cgx_id, lmac_id;
- int rc = 0;
- u64 cfg;
-
- if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
- return -EPERM;
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
- rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, req->hdr.pcifunc)))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
- rsp->hdr.rc = rc;
- cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
- /* copy 48 bit mac address to req->mac_addr */
- u64_to_ether_addr(cfg, rsp->mac_addr);
+ ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
return 0;
}
@@ -790,7 +785,7 @@ int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
@@ -809,7 +804,7 @@ int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
@@ -828,7 +823,7 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
void *cgxd;
@@ -864,7 +859,7 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
- if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc)))
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, req->hdr.pcifunc)))
return -EPERM;
return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
@@ -878,7 +873,7 @@ int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req,
static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, pcifunc))
@@ -917,7 +912,7 @@ int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
u8 cgx_id, lmac_id;
int pf, err;
- pf = rvu_get_pf(req->hdr.pcifunc);
+ pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
if (!is_pf_cgxmapped(rvu, pf))
return -ENODEV;
@@ -933,7 +928,7 @@ int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
struct msg_req *req,
struct cgx_features_info_msg *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_idx, lmac;
void *cgxd;
@@ -975,7 +970,7 @@ u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac)
static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
@@ -1005,7 +1000,7 @@ int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 rx_pfc = 0, tx_pfc = 0;
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
@@ -1046,7 +1041,7 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
struct cgx_pause_frm_cfg *req,
struct cgx_pause_frm_cfg *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
int err = 0;
@@ -1073,7 +1068,7 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!is_pf_cgxmapped(rvu, pf))
@@ -1106,7 +1101,7 @@ int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
/* Assumes LF of a PF and all of its VF belongs to the same
* NIX block
*/
- pcifunc = pf << RVU_PFVF_PF_SHIFT;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
return 0;
@@ -1133,10 +1128,10 @@ int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
struct rvu_pfvf *parent_pf, *pfvf;
int cgx_users, err = 0;
- if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc)))
return 0;
- parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
+ parent_pf = &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)];
pfvf = rvu_get_pfvf(rvu, pcifunc);
mutex_lock(&rvu->cgx_cfg_lock);
@@ -1179,7 +1174,7 @@ int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu,
struct fec_mode *req,
struct fec_mode *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!is_pf_cgxmapped(rvu, pf))
@@ -1195,7 +1190,7 @@ int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu,
int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
struct cgx_fw_data *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!rvu->fwdata)
@@ -1222,7 +1217,8 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
struct cgx_set_link_mode_req *req,
struct cgx_set_link_mode_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
+ struct cgx_lmac_fwdata_s *linkmodes;
u8 cgx_idx, lmac;
void *cgxd;
@@ -1231,14 +1227,20 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
cgxd = rvu_cgx_pdata(cgx_idx, rvu);
- rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
+ if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX)
+ linkmodes = &rvu->fwdata->cgx_fw_data_usx[cgx_idx][lmac];
+ else
+ linkmodes = &rvu->fwdata->cgx_fw_data[cgx_idx][lmac];
+
+ rsp->status = cgx_set_link_mode(cgxd, req->args, linkmodes,
+ cgx_idx, lmac);
return 0;
}
int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
struct msg_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
@@ -1256,7 +1258,7 @@ int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
struct cgx_mac_addr_update_req *req,
struct cgx_mac_addr_update_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
@@ -1272,7 +1274,7 @@ int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause,
u8 rx_pause, u16 pfc_en)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 rx_8023 = 0, tx_8023 = 0;
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
@@ -1310,7 +1312,7 @@ int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu,
struct cgx_pfc_cfg *req,
struct cgx_pfc_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
void *cgxd;
@@ -1335,7 +1337,7 @@ int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu,
void rvu_mac_reset(struct rvu *rvu, u16 pcifunc)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
struct mac_ops *mac_ops;
struct cgx *cgxd;
u8 cgx, lmac;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
index 4a3370a40dd8..d2163da28d18 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -66,7 +66,7 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
#define LMT_MAP_TBL_W1_OFF 8
static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
{
- return ((rvu_get_pf(pcifunc) * LMT_MAX_VFS) +
+ return ((rvu_get_pf(rvu->pdev, pcifunc) * LMT_MAX_VFS) +
(pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
}
@@ -83,7 +83,7 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
mutex_lock(&rvu->rsrc_lock);
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
- pf = rvu_get_pf(pcifunc) & RVU_PFVF_PF_MASK;
+ pf = rvu_get_pf(rvu->pdev, pcifunc) & RVU_OTX2_PFVF_PF_MASK;
val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF);
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
@@ -155,7 +155,7 @@ int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu,
int err = 0;
u64 val;
- /* Check if PF_FUNC wants to use it's own local memory as LMTLINE
+ /* Check if PF_FUNC wants to use its own local memory as LMTLINE
* region, if so, convert that IOVA to physical address and
* populate LMT table with that address
*/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index 3c5bbaf12e59..f404117bf6c8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -410,7 +410,7 @@ static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc)
{
int cpt_pf_num = rvu->cpt_pf_num;
- if (rvu_get_pf(pcifunc) != cpt_pf_num)
+ if (rvu_get_pf(rvu->pdev, pcifunc) != cpt_pf_num)
return false;
if (pcifunc & RVU_PFVF_FUNC_MASK)
return false;
@@ -422,7 +422,7 @@ static bool is_cpt_vf(struct rvu *rvu, u16 pcifunc)
{
int cpt_pf_num = rvu->cpt_pf_num;
- if (rvu_get_pf(pcifunc) != cpt_pf_num)
+ if (rvu_get_pf(rvu->pdev, pcifunc) != cpt_pf_num)
return false;
if (!(pcifunc & RVU_PFVF_FUNC_MASK))
return false;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index c827da626471..8375f18c8e07 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -688,7 +688,7 @@ static int get_max_column_width(struct rvu *rvu)
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
- pcifunc = pf << 10 | vf;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf);
if (!pcifunc)
continue;
@@ -759,7 +759,7 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
off = 0;
flag = 0;
- pcifunc = pf << 10 | vf;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf);
if (!pcifunc)
continue;
@@ -842,7 +842,7 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
cgx[0] = 0;
lmac[0] = 0;
- pcifunc = pf << 10;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
pfvf = rvu_get_pfvf(rvu, pcifunc);
if (pfvf->nix_blkaddr == BLKADDR_NIX0)
@@ -867,6 +867,71 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
+static int rvu_dbg_rvu_fwdata_display(struct seq_file *s, void *unused)
+{
+ struct rvu *rvu = s->private;
+ struct rvu_fwdata *fwdata;
+ u8 mac[ETH_ALEN];
+ int count = 0, i;
+
+ if (!rvu->fwdata)
+ return -EAGAIN;
+
+ fwdata = rvu->fwdata;
+ seq_puts(s, "\nRVU Firmware Data:\n");
+ seq_puts(s, "\n\t\tPTP INFORMATION\n");
+ seq_puts(s, "\t\t===============\n");
+ seq_printf(s, "\t\texternal clockrate \t :%x\n",
+ fwdata->ptp_ext_clk_rate);
+ seq_printf(s, "\t\texternal timestamp \t :%x\n",
+ fwdata->ptp_ext_tstamp);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\n\t\tSDP CHANNEL INFORMATION\n");
+ seq_puts(s, "\t\t=======================\n");
+ seq_printf(s, "\t\tValid \t\t\t :%x\n", fwdata->channel_data.valid);
+ seq_printf(s, "\t\tNode ID \t\t :%x\n",
+ fwdata->channel_data.info.node_id);
+ seq_printf(s, "\t\tNumber of VFs \t\t :%x\n",
+ fwdata->channel_data.info.max_vfs);
+ seq_printf(s, "\t\tNumber of PF-Rings \t :%x\n",
+ fwdata->channel_data.info.num_pf_rings);
+ seq_printf(s, "\t\tPF SRN \t\t\t :%x\n",
+ fwdata->channel_data.info.pf_srn);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\n\t\tPF-INDEX MACADDRESS\n");
+ seq_puts(s, "\t\t====================\n");
+ for (i = 0; i < PF_MACNUM_MAX; i++) {
+ u64_to_ether_addr(fwdata->pf_macs[i], mac);
+ if (!is_zero_ether_addr(mac)) {
+ seq_printf(s, "\t\t %d %pM\n", i, mac);
+ count++;
+ }
+ }
+
+ if (!count)
+ seq_puts(s, "\t\tNo valid address found\n");
+
+ seq_puts(s, "\n\t\tVF-INDEX MACADDRESS\n");
+ seq_puts(s, "\t\t====================\n");
+ count = 0;
+ for (i = 0; i < VF_MACNUM_MAX; i++) {
+ u64_to_ether_addr(fwdata->vf_macs[i], mac);
+ if (!is_zero_ether_addr(mac)) {
+ seq_printf(s, "\t\t %d %pM\n", i, mac);
+ count++;
+ }
+ }
+
+ if (!count)
+ seq_puts(s, "\t\tNo valid address found\n");
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(rvu_fwdata, rvu_fwdata_display, NULL);
+
static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
u16 *pcifunc)
{
@@ -2623,10 +2688,10 @@ static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
pcifunc = ipolicer->pfvf_map[idx];
if (!(pcifunc & RVU_PFVF_FUNC_MASK))
seq_printf(m, "Allocated to :: PF %d\n",
- rvu_get_pf(pcifunc));
+ rvu_get_pf(rvu->pdev, pcifunc));
else
seq_printf(m, "Allocated to :: PF %d VF %d\n",
- rvu_get_pf(pcifunc),
+ rvu_get_pf(rvu->pdev, pcifunc),
(pcifunc & RVU_PFVF_FUNC_MASK) - 1);
print_band_prof_ctx(m, &aq_rsp.prof);
}
@@ -2923,6 +2988,97 @@ static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *s, void *unused)
RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
+static int cgx_print_fwdata(struct seq_file *s, int lmac_id)
+{
+ struct cgx_lmac_fwdata_s *fwdata;
+ void *cgxd = s->private;
+ struct phy_s *phy;
+ struct rvu *rvu;
+ int cgx_id, i;
+
+ rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
+ if (!rvu)
+ return -ENODEV;
+
+ if (!rvu->fwdata)
+ return -EAGAIN;
+
+ cgx_id = cgx_get_cgxid(cgxd);
+
+ if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX)
+ fwdata = &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id];
+ else
+ fwdata = &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id];
+
+ seq_puts(s, "\nFIRMWARE SHARED:\n");
+ seq_puts(s, "\t\tSUPPORTED LINK INFORMATION\t\t\n");
+ seq_puts(s, "\t\t==========================\n");
+ seq_printf(s, "\t\t Link modes \t\t :%llx\n",
+ fwdata->supported_link_modes);
+ seq_printf(s, "\t\t Autoneg \t\t :%llx\n", fwdata->supported_an);
+ seq_printf(s, "\t\t FEC \t\t\t :%llx\n", fwdata->supported_fec);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\t\tADVERTISED LINK INFORMATION\t\t\n");
+ seq_puts(s, "\t\t==========================\n");
+ seq_printf(s, "\t\t Link modes \t\t :%llx\n",
+ (u64)fwdata->advertised_link_modes);
+ seq_printf(s, "\t\t Autoneg \t\t :%x\n", fwdata->advertised_an);
+ seq_printf(s, "\t\t FEC \t\t\t :%llx\n", fwdata->advertised_fec);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\t\tLMAC CONFIG\t\t\n");
+ seq_puts(s, "\t\t============\n");
+ seq_printf(s, "\t\t rw_valid \t\t :%x\n", fwdata->rw_valid);
+ seq_printf(s, "\t\t lmac_type \t\t :%x\n", fwdata->lmac_type);
+ seq_printf(s, "\t\t portm_idx \t\t :%x\n", fwdata->portm_idx);
+ seq_printf(s, "\t\t mgmt_port \t\t :%x\n", fwdata->mgmt_port);
+ seq_printf(s, "\t\t Link modes own \t :%llx\n",
+ (u64)fwdata->advertised_link_modes_own);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\n\t\tEEPROM DATA\n");
+ seq_puts(s, "\t\t===========\n");
+ seq_printf(s, "\t\t sff_id \t\t :%x\n", fwdata->sfp_eeprom.sff_id);
+ seq_puts(s, "\t\t data \t\t\t :\n");
+ seq_puts(s, "\t\t");
+ for (i = 0; i < SFP_EEPROM_SIZE; i++) {
+ seq_printf(s, "%x", fwdata->sfp_eeprom.buf[i]);
+ if ((i + 1) % 16 == 0) {
+ seq_puts(s, "\n");
+ seq_puts(s, "\t\t");
+ }
+ }
+ seq_puts(s, "\n");
+
+ phy = &fwdata->phy;
+ seq_puts(s, "\n\t\tPHY INFORMATION\n");
+ seq_puts(s, "\t\t===============\n");
+ seq_printf(s, "\t\t Mod type configurable \t\t :%x\n",
+ phy->misc.can_change_mod_type);
+ seq_printf(s, "\t\t Mod type \t\t\t :%x\n", phy->misc.mod_type);
+ seq_printf(s, "\t\t Support FEC \t\t\t :%x\n", phy->misc.has_fec_stats);
+ seq_printf(s, "\t\t RSFEC corrected words \t\t :%x\n",
+ phy->fec_stats.rsfec_corr_cws);
+ seq_printf(s, "\t\t RSFEC uncorrected words \t :%x\n",
+ phy->fec_stats.rsfec_uncorr_cws);
+ seq_printf(s, "\t\t BRFEC corrected words \t\t :%x\n",
+ phy->fec_stats.brfec_corr_blks);
+ seq_printf(s, "\t\t BRFEC uncorrected words \t :%x\n",
+ phy->fec_stats.brfec_uncorr_blks);
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int rvu_dbg_cgx_fwdata_display(struct seq_file *s, void *unused)
+{
+ return cgx_print_fwdata(s, rvu_dbg_derive_lmacid(s));
+}
+
+RVU_DEBUG_SEQ_FOPS(cgx_fwdata, cgx_fwdata_display, NULL);
+
static void rvu_dbg_cgx_init(struct rvu *rvu)
{
struct mac_ops *mac_ops;
@@ -2962,6 +3118,9 @@ static void rvu_dbg_cgx_init(struct rvu *rvu)
debugfs_create_file_aux_num("mac_filter", 0600,
rvu->rvu_dbg.lmac, cgx, lmac_id,
&rvu_dbg_cgx_dmac_flt_fops);
+ debugfs_create_file("fwdata", 0600,
+ rvu->rvu_dbg.lmac, cgx,
+ &rvu_dbg_cgx_fwdata_fops);
}
}
}
@@ -2983,10 +3142,10 @@ static void rvu_print_npc_mcam_info(struct seq_file *s,
if (!(pcifunc & RVU_PFVF_FUNC_MASK))
seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
- rvu_get_pf(pcifunc));
+ rvu_get_pf(rvu->pdev, pcifunc));
else
seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
- rvu_get_pf(pcifunc),
+ rvu_get_pf(rvu->pdev, pcifunc),
(pcifunc & RVU_PFVF_FUNC_MASK) - 1);
if (entry_acnt) {
@@ -3049,13 +3208,13 @@ static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
seq_puts(filp, "\n\t\t Current allocation\n");
seq_puts(filp, "\t\t====================\n");
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
- pcifunc = (pf << RVU_PFVF_PF_SHIFT);
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
numvfs = (cfg >> 12) & 0xFF;
for (vf = 0; vf < numvfs; vf++) {
- pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, (vf + 1));
rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
}
}
@@ -3326,7 +3485,7 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
mutex_lock(&mcam->lock);
list_for_each_entry(iter, &mcam->mcam_rules, list) {
- pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+ pf = rvu_get_pf(rvu->pdev, iter->owner);
seq_printf(s, "\n\tInstalled by: PF%d ", pf);
if (iter->owner & RVU_PFVF_FUNC_MASK) {
@@ -3344,7 +3503,7 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
rvu_dbg_npc_mcam_show_flows(s, iter);
if (is_npc_intf_rx(iter->intf)) {
target = iter->rx_action.pf_func;
- pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+ pf = rvu_get_pf(rvu->pdev, target);
seq_printf(s, "\tForward to: PF%d ", pf);
if (target & RVU_PFVF_FUNC_MASK) {
@@ -3808,6 +3967,9 @@ void rvu_dbg_init(struct rvu *rvu)
debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
rvu, &rvu_dbg_lmtst_map_table_fops);
+ debugfs_create_file("rvu_fwdata", 0444, rvu->rvu_dbg.root, rvu,
+ &rvu_dbg_rvu_fwdata_fops);
+
if (!cgx_get_cgxcnt_max())
goto create;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 613655fcd34f..60db1f616cc8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -315,7 +315,8 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
if (lvl >= hw->cap.nix_tx_aggr_lvl) {
if ((nix_get_tx_link(rvu, map_func) !=
nix_get_tx_link(rvu, pcifunc)) &&
- (rvu_get_pf(map_func) != rvu_get_pf(pcifunc)))
+ (rvu_get_pf(rvu->pdev, map_func) !=
+ rvu_get_pf(rvu->pdev, pcifunc)))
return false;
else
return true;
@@ -339,7 +340,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
bool from_vf;
int err;
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
type != NIX_INTF_TYPE_SDP)
return 0;
@@ -416,7 +417,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
break;
case NIX_INTF_TYPE_SDP:
from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
- parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
+ parent_pf = &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)];
sdp_info = parent_pf->sdp_info;
if (!sdp_info) {
dev_err(rvu->dev, "Invalid sdp_info pointer\n");
@@ -590,12 +591,12 @@ static int nix_bp_disable(struct rvu *rvu,
u16 chan_v;
u64 cfg;
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
return 0;
- if (is_sdp_pfvf(pcifunc))
+ if (is_sdp_pfvf(rvu, pcifunc))
type = NIX_INTF_TYPE_SDP;
if (cpt_link && !rvu->hw->cpt_links)
@@ -736,9 +737,9 @@ static int nix_bp_enable(struct rvu *rvu,
u16 chan_v;
u64 cfg;
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
- if (is_sdp_pfvf(pcifunc))
+ if (is_sdp_pfvf(rvu, pcifunc))
type = NIX_INTF_TYPE_SDP;
/* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */
@@ -1674,7 +1675,7 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
}
intf = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
- if (is_sdp_pfvf(pcifunc))
+ if (is_sdp_pfvf(rvu, pcifunc))
intf = NIX_INTF_TYPE_SDP;
err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp,
@@ -1798,7 +1799,8 @@ int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
if (rc < 0) {
dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
- rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
+ rvu_get_pf(rvu->pdev, pcifunc),
+ pcifunc & RVU_PFVF_FUNC_MASK);
return NIX_AF_ERR_MARK_CFG_FAIL;
}
@@ -2050,7 +2052,7 @@ static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr,
static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
{
struct rvu_hwinfo *hw = rvu->hw;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 cgx_id = 0, lmac_id = 0;
if (is_lbk_vf(rvu, pcifunc)) {/* LBK links */
@@ -2068,7 +2070,7 @@ static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
int link, int *start, int *end)
{
struct rvu_hwinfo *hw = rvu->hw;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
/* LBK links */
if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc)) {
@@ -2426,7 +2428,7 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
{
struct nix_smq_flush_ctx *smq_flush_ctx;
int err, restore_tx_en = 0, i;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 cgx_id = 0, lmac_id = 0;
u16 tl2_tl3_link_schq;
u8 link, link_level;
@@ -2820,7 +2822,7 @@ void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc,
{
struct rvu_hwinfo *hw = rvu->hw;
int lbk_link_start, lbk_links;
- u8 pf = rvu_get_pf(pcifunc);
+ u8 pf = rvu_get_pf(rvu->pdev, pcifunc);
int schq;
u64 cfg;
@@ -3190,7 +3192,8 @@ static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
if (err) {
dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
- rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
+ rvu_get_pf(rvu->pdev, pcifunc),
+ pcifunc & RVU_PFVF_FUNC_MASK);
return err;
}
return 0;
@@ -3458,7 +3461,7 @@ int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
dev_err(rvu->dev,
"%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
__func__, idx, mce_list->max,
- pcifunc >> RVU_PFVF_PF_SHIFT);
+ rvu_get_pf(rvu->pdev, pcifunc));
return -EINVAL;
}
@@ -3510,7 +3513,8 @@ void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
struct rvu_pfvf *pfvf;
if (!hw->cap.nix_rx_multicast ||
- !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
+ !is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev,
+ pcifunc & ~RVU_PFVF_FUNC_MASK))) {
*mce_list = NULL;
*mce_idx = 0;
return;
@@ -3544,13 +3548,13 @@ static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
int pf;
/* skip multicast pkt replication for AF's VFs & SDP links */
- if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(rvu, pcifunc))
return 0;
if (!hw->cap.nix_rx_multicast)
return 0;
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
if (!is_pf_cgxmapped(rvu, pf))
return 0;
@@ -3619,7 +3623,7 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
for (idx = 0; idx < (numvfs + 1); idx++) {
/* idx-0 is for PF, followed by VFs */
- pcifunc = (pf << RVU_PFVF_PF_SHIFT);
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
pcifunc |= idx;
/* Add dummy entries now, so that we don't have to check
* for whether AQ_OP should be INIT/WRITE later on.
@@ -4554,7 +4558,7 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
static void nix_find_link_frs(struct rvu *rvu,
struct nix_frs_cfg *req, u16 pcifunc)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
struct rvu_pfvf *pfvf;
int maxlen, minlen;
int numvfs, hwvf;
@@ -4601,7 +4605,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
int blkaddr, link = -1;
struct nix_hw *nix_hw;
struct rvu_pfvf *pfvf;
@@ -5046,7 +5050,7 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
(ltdefs->rx_apad1.ltype_match << 4) |
ltdefs->rx_apad1.ltype_mask);
- /* Receive ethertype defination register defines layer
+ /* Receive ethertype definition register defines layer
* information in NPC_RESULT_S to identify the Ethertype
* location in L2 header. Used for Ethertype overwriting
* in inline IPsec flow.
@@ -5251,7 +5255,7 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
rvu_switch_update_rules(rvu, pcifunc, true);
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode)
rvu_rep_notify_pfvf_state(rvu, pcifunc, true);
@@ -5284,7 +5288,7 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
rvu_switch_update_rules(rvu, pcifunc, false);
rvu_cgx_tx_enable(rvu, pcifunc, true);
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode)
rvu_rep_notify_pfvf_state(rvu, pcifunc, false);
return 0;
@@ -5296,7 +5300,7 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct hwctx_disable_req ctx_req;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
u64 sa_base;
@@ -5385,7 +5389,7 @@ static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
int nixlf;
u64 cfg;
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
return 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index da15bb451178..c7c70429eb6c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -147,7 +147,9 @@ static int npc_get_ucast_mcam_index(struct npc_mcam *mcam, u16 pcifunc,
int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
u16 pcifunc, int nixlf, int type)
{
- int pf = rvu_get_pf(pcifunc);
+ struct rvu_hwinfo *hw = container_of(mcam, struct rvu_hwinfo, mcam);
+ struct rvu *rvu = hw->rvu;
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
int index;
/* Check if this is for a PF */
@@ -698,7 +700,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
/* RX_ACTION set to MCAST for CGX PF's */
if (hw->cap.nix_rx_multicast && pfvf->use_mce_list &&
- is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
+ is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) {
*(u64 *)&action = 0;
action.op = NIX_RX_ACTIONOP_MCAST;
pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
@@ -3434,7 +3436,7 @@ int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
int blkaddr, nixlf, rc, intf_mode;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u64 rxpkind, txpkind;
u8 cgx_id, lmac_id;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
index d2661e7fabdb..999f6d93c7fe 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
@@ -1465,7 +1465,7 @@ static int rvu_npc_exact_update_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_
int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
{
struct npc_exact_table *table;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 cgx_id, lmac_id;
u32 drop_mcam_idx;
bool *promisc;
@@ -1512,7 +1512,7 @@ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
{
struct npc_exact_table *table;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 cgx_id, lmac_id;
u32 drop_mcam_idx;
bool *promisc;
@@ -1560,7 +1560,7 @@ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
struct msg_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u32 seq_id = req->index;
struct rvu_pfvf *pfvf;
u8 cgx_id, lmac_id;
@@ -1593,7 +1593,7 @@ int rvu_npc_exact_mac_addr_update(struct rvu *rvu,
struct cgx_mac_addr_update_req *req,
struct cgx_mac_addr_update_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
struct npc_exact_table_entry *entry;
struct npc_exact_table *table;
struct rvu_pfvf *pfvf;
@@ -1675,7 +1675,7 @@ int rvu_npc_exact_mac_addr_add(struct rvu *rvu,
struct cgx_mac_addr_add_req *req,
struct cgx_mac_addr_add_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
struct rvu_pfvf *pfvf;
u8 cgx_id, lmac_id;
int rc = 0;
@@ -1711,7 +1711,7 @@ int rvu_npc_exact_mac_addr_del(struct rvu *rvu,
struct cgx_mac_addr_del_req *req,
struct msg_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
int rc;
rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
@@ -1736,7 +1736,7 @@ int rvu_npc_exact_mac_addr_del(struct rvu *rvu,
int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u32 seq_id = req->index;
struct rvu_pfvf *pfvf;
u8 cgx_id, lmac_id;
@@ -2001,7 +2001,7 @@ int rvu_npc_exact_init(struct rvu *rvu)
}
/* Filter rules are only for PF */
- pcifunc = RVU_PFFUNC(i, 0);
+ pcifunc = RVU_PFFUNC(rvu->pdev, i, 0);
dev_dbg(rvu->dev,
"%s:Drop rule cgx=%d lmac=%d chan(val=0x%llx, mask=0x%llx\n",
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
index 57a09328d46b..cb25cf478f1f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
@@ -139,9 +139,7 @@ static struct npc_mcam_kex_hash npc_mkex_hash_default __maybe_unused = {
#define NPC_MCAM_DROP_RULE_MAX 30
#define NPC_MCAM_SDP_DROP_RULE_IDX 0
-#define RVU_PFFUNC(pf, func) \
- ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \
- (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT))
+#define RVU_PFFUNC(pdev, pf, func) rvu_make_pcifunc(pdev, pf, func)
enum npc_exact_opc_type {
NPC_EXACT_OPC_MEM,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
index 32953cca108c..03099bc570bd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
@@ -39,7 +39,7 @@ static int rvu_rep_up_notify(struct rvu *rvu, struct rep_event *event)
struct rep_event *msg;
int pf;
- pf = rvu_get_pf(event->pcifunc);
+ pf = rvu_get_pf(rvu->pdev, event->pcifunc);
if (event->event & RVU_EVENT_MAC_ADDR_CHANGE)
ether_addr_copy(pfvf->mac_addr, event->evt_data.mac);
@@ -114,10 +114,10 @@ int rvu_rep_notify_pfvf_state(struct rvu *rvu, u16 pcifunc, bool enable)
struct rep_event *req;
int pf;
- if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc)))
return 0;
- pf = rvu_get_pf(rvu->rep_pcifunc);
+ pf = rvu_get_pf(rvu->pdev, rvu->rep_pcifunc);
mutex_lock(&rvu->mbox_lock);
req = otx2_mbox_alloc_msg_rep_event_up_notify(rvu, pf);
@@ -325,7 +325,7 @@ int rvu_rep_install_mcam_rules(struct rvu *rvu)
if (!is_pf_cgxmapped(rvu, pf))
continue;
- pcifunc = pf << RVU_PFVF_PF_SHIFT;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
rvu_get_nix_blkaddr(rvu, pcifunc);
rep = true;
for (i = 0; i < 2; i++) {
@@ -345,8 +345,7 @@ int rvu_rep_install_mcam_rules(struct rvu *rvu)
rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
for (vf = 0; vf < numvfs; vf++) {
- pcifunc = pf << RVU_PFVF_PF_SHIFT |
- ((vf + 1) & RVU_PFVF_FUNC_MASK);
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf + 1);
rvu_get_nix_blkaddr(rvu, pcifunc);
/* Skip installimg rules if nixlf is not attached */
@@ -454,7 +453,7 @@ int rvu_mbox_handler_get_rep_cnt(struct rvu *rvu, struct msg_req *req,
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
if (!is_pf_cgxmapped(rvu, pf))
continue;
- pcifunc = pf << RVU_PFVF_PF_SHIFT;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
rvu->rep2pfvf_map[rep] = pcifunc;
rsp->rep_pf_map[rep] = pcifunc;
rep++;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
index 38cfe148f4b7..e4a5f9fa6fd4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
@@ -17,9 +17,9 @@
/* SDP PF number */
static int sdp_pf_num[MAX_SDP] = {-1, -1};
-bool is_sdp_pfvf(u16 pcifunc)
+bool is_sdp_pfvf(struct rvu *rvu, u16 pcifunc)
{
- u16 pf = rvu_get_pf(pcifunc);
+ u16 pf = rvu_get_pf(rvu->pdev, pcifunc);
u32 found = 0, i = 0;
while (i < MAX_SDP) {
@@ -34,9 +34,9 @@ bool is_sdp_pfvf(u16 pcifunc)
return true;
}
-bool is_sdp_pf(u16 pcifunc)
+bool is_sdp_pf(struct rvu *rvu, u16 pcifunc)
{
- return (is_sdp_pfvf(pcifunc) &&
+ return (is_sdp_pfvf(rvu, pcifunc) &&
!(pcifunc & RVU_PFVF_FUNC_MASK));
}
@@ -46,7 +46,7 @@ bool is_sdp_vf(struct rvu *rvu, u16 pcifunc)
if (!(pcifunc & ~RVU_PFVF_FUNC_MASK))
return (rvu->vf_devid == RVU_SDP_VF_DEVID);
- return (is_sdp_pfvf(pcifunc) &&
+ return (is_sdp_pfvf(rvu, pcifunc) &&
!!(pcifunc & RVU_PFVF_FUNC_MASK));
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index 77ac94cb2ec4..0596a3ac4c12 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -33,7 +33,8 @@ enum rvu_block_addr_e {
BLKADDR_NDC_NIX1_RX = 0x10ULL,
BLKADDR_NDC_NIX1_TX = 0x11ULL,
BLKADDR_APR = 0x16ULL,
- BLK_COUNT = 0x17ULL,
+ BLKADDR_MBOX = 0x1bULL,
+ BLK_COUNT = 0x1cULL,
};
/* RVU Block Type Enumeration */
@@ -49,7 +50,8 @@ enum rvu_block_type_e {
BLKTYPE_TIM = 0x8,
BLKTYPE_CPT = 0x9,
BLKTYPE_NDC = 0xa,
- BLKTYPE_MAX = 0xa,
+ BLKTYPE_MBOX = 0x13,
+ BLKTYPE_MAX = 0x13,
};
/* RVU Admin function Interrupt Vector Enumeration */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
index 268efb7c1c15..49ce38685a7e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
@@ -93,7 +93,7 @@ static int rvu_switch_install_rules(struct rvu *rvu)
if (!is_pf_cgxmapped(rvu, pf))
continue;
- pcifunc = pf << 10;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
/* rvu_get_nix_blkaddr sets up the corresponding NIX block
* address and NIX RX and TX interfaces for a pcifunc.
* Generally it is called during attach call of a pcifunc but it
@@ -126,7 +126,7 @@ static int rvu_switch_install_rules(struct rvu *rvu)
rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
for (vf = 0; vf < numvfs; vf++) {
- pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, (vf + 1));
rvu_get_nix_blkaddr(rvu, pcifunc);
err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
@@ -236,7 +236,7 @@ void rvu_switch_disable(struct rvu *rvu)
if (!is_pf_cgxmapped(rvu, pf))
continue;
- pcifunc = pf << 10;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
if (err)
dev_err(rvu->dev,
@@ -248,7 +248,7 @@ void rvu_switch_disable(struct rvu *rvu)
rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
for (vf = 0; vf < numvfs; vf++) {
- pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, (vf + 1));
err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
if (err)
dev_err(rvu->dev,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 69e0778f9ac1..883e9f4d601c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o
obj-$(CONFIG_RVU_ESWITCH) += rvu_rep.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
- otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
+ otx2_flows.o otx2_tc.o cn10k.o cn20k.o otx2_dmac_flt.o \
otx2_devlink.o qos_sq.o qos.o otx2_xsk.o
rvu_nicvf-y := otx2_vf.o
rvu_rep-y := rep.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index 7f6a435ac680..bec7d5b4d7cc 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -14,6 +14,7 @@ static struct dev_hw_ops otx2_hw_ops = {
.sqe_flush = otx2_sqe_flush,
.aura_freeptr = otx2_aura_freeptr,
.refill_pool_ptrs = otx2_refill_pool_ptrs,
+ .pfaf_mbox_intr_handler = otx2_pfaf_mbox_intr_handler,
};
static struct dev_hw_ops cn10k_hw_ops = {
@@ -21,8 +22,20 @@ static struct dev_hw_ops cn10k_hw_ops = {
.sqe_flush = cn10k_sqe_flush,
.aura_freeptr = cn10k_aura_freeptr,
.refill_pool_ptrs = cn10k_refill_pool_ptrs,
+ .pfaf_mbox_intr_handler = otx2_pfaf_mbox_intr_handler,
};
+void otx2_init_hw_ops(struct otx2_nic *pfvf)
+{
+ if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
+ pfvf->hw_ops = &otx2_hw_ops;
+ return;
+ }
+
+ pfvf->hw_ops = &cn10k_hw_ops;
+}
+EXPORT_SYMBOL(otx2_init_hw_ops);
+
int cn10k_lmtst_init(struct otx2_nic *pfvf)
{
@@ -30,12 +43,9 @@ int cn10k_lmtst_init(struct otx2_nic *pfvf)
struct otx2_lmt_info *lmt_info;
int err, cpu;
- if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
- pfvf->hw_ops = &otx2_hw_ops;
+ if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag))
return 0;
- }
- pfvf->hw_ops = &cn10k_hw_ops;
/* Total LMTLINES = num_online_cpus() * 32 (For Burst flush).*/
pfvf->tot_lmt_lines = (num_online_cpus() * LMT_BURST_SIZE);
pfvf->hw.lmt_info = alloc_percpu(struct otx2_lmt_info);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
index e3f0bce9908f..945ab10bd4ed 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
@@ -39,4 +39,5 @@ int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf);
int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
u32 burst, u64 rate, bool pps);
int cn10k_free_leaf_profile(struct otx2_nic *pfvf, u16 leaf);
+void otx2_init_hw_ops(struct otx2_nic *pfvf);
#endif /* CN10K_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
index a6500e3673f2..c691f0722154 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
@@ -481,7 +481,7 @@ static int cn10k_outb_write_sa(struct otx2_nic *pf, struct qmem *sa_info)
goto set_available;
/* Trigger CTX flush to write dirty data back to DRAM */
- reg_val = FIELD_PREP(CPT_LF_CTX_FLUSH, sa_iova >> 7);
+ reg_val = FIELD_PREP(CPT_LF_CTX_FLUSH_CPTR, sa_iova >> 7);
otx2_write64(pf, CN10K_CPT_LF_CTX_FLUSH, reg_val);
set_available:
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
index 9965df0faa3e..43fbce0d6039 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
@@ -220,7 +220,7 @@ struct cpt_sg_s {
#define CPT_LF_Q_SIZE_DIV40 GENMASK_ULL(14, 0)
/* CPT LF CTX Flush Register */
-#define CPT_LF_CTX_FLUSH GENMASK_ULL(45, 0)
+#define CPT_LF_CTX_FLUSH_CPTR GENMASK_ULL(45, 0)
#ifdef CONFIG_XFRM_OFFLOAD
int cn10k_ipsec_init(struct net_device *netdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c
new file mode 100644
index 000000000000..ec8cde98076d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#include "otx2_common.h"
+#include "otx2_reg.h"
+#include "otx2_struct.h"
+#include "cn10k.h"
+
+static struct dev_hw_ops cn20k_hw_ops = {
+ .pfaf_mbox_intr_handler = cn20k_pfaf_mbox_intr_handler,
+ .vfaf_mbox_intr_handler = cn20k_vfaf_mbox_intr_handler,
+ .pfvf_mbox_intr_handler = cn20k_pfvf_mbox_intr_handler,
+};
+
+void cn20k_init(struct otx2_nic *pfvf)
+{
+ pfvf->hw_ops = &cn20k_hw_ops;
+}
+EXPORT_SYMBOL(cn20k_init);
+/* CN20K mbox AF => PFx irq handler */
+irqreturn_t cn20k_pfaf_mbox_intr_handler(int irq, void *pf_irq)
+{
+ struct otx2_nic *pf = pf_irq;
+ struct mbox *mw = &pf->mbox;
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+ u64 pf_trig_val;
+
+ pf_trig_val = otx2_read64(pf, RVU_PF_INT) & 0x3ULL;
+
+ /* Clear the IRQ */
+ otx2_write64(pf, RVU_PF_INT, pf_trig_val);
+
+ if (pf_trig_val & BIT_ULL(0)) {
+ mbox = &mw->mbox_up;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(pf->mbox_wq, &mw->mbox_up_wrk);
+
+ trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF",
+ BIT_ULL(0));
+ }
+
+ if (pf_trig_val & BIT_ULL(1)) {
+ mbox = &mw->mbox;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(pf->mbox_wq, &mw->mbox_wrk);
+ trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF",
+ BIT_ULL(1));
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t cn20k_vfaf_mbox_intr_handler(int irq, void *vf_irq)
+{
+ struct otx2_nic *vf = vf_irq;
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+ u64 vf_trig_val;
+
+ vf_trig_val = otx2_read64(vf, RVU_VF_INT) & 0x3ULL;
+ /* Clear the IRQ */
+ otx2_write64(vf, RVU_VF_INT, vf_trig_val);
+
+ /* Read latest mbox data */
+ smp_rmb();
+
+ if (vf_trig_val & BIT_ULL(1)) {
+ /* Check for PF => VF response messages */
+ mbox = &vf->mbox.mbox;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
+
+ trace_otx2_msg_interrupt(mbox->pdev, "DOWN reply from PF0 to VF",
+ BIT_ULL(1));
+ }
+
+ if (vf_trig_val & BIT_ULL(0)) {
+ /* Check for PF => VF notification messages */
+ mbox = &vf->mbox.mbox_up;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
+
+ trace_otx2_msg_interrupt(mbox->pdev, "UP message from PF0 to VF",
+ BIT_ULL(0));
+ }
+
+ return IRQ_HANDLED;
+}
+
+void cn20k_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
+{
+ /* Clear PF <=> VF mailbox IRQ */
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(0), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(1), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(0), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(1), ~0ull);
+
+ /* Enable PF <=> VF mailbox IRQ */
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(0), INTR_MASK(numvfs));
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(0), INTR_MASK(numvfs));
+ if (numvfs > 64) {
+ numvfs -= 64;
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(1),
+ INTR_MASK(numvfs));
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(1),
+ INTR_MASK(numvfs));
+ }
+}
+
+void cn20k_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
+{
+ int vector, intr_vec, vec = 0;
+
+ /* Disable PF <=> VF mailbox IRQ */
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(0), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(1), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(0), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(1), ~0ull);
+
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(0), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(0), ~0ull);
+
+ if (numvfs > 64) {
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(1), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(1), ~0ull);
+ }
+
+ for (intr_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0; intr_vec <=
+ RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1; intr_vec++, vec++) {
+ vector = pci_irq_vector(pf->pdev, intr_vec);
+ free_irq(vector, pf->hw.pfvf_irq_devid[vec]);
+ }
+}
+
+irqreturn_t cn20k_pfvf_mbox_intr_handler(int irq, void *pf_irq)
+{
+ struct pf_irq_data *irq_data = pf_irq;
+ struct otx2_nic *pf = irq_data->pf;
+ struct mbox *mbox;
+ u64 intr;
+
+ /* Sync with mbox memory region */
+ rmb();
+
+ /* Clear interrupts */
+ intr = otx2_read64(pf, irq_data->intr_status);
+ otx2_write64(pf, irq_data->intr_status, intr);
+ mbox = pf->mbox_pfvf;
+
+ if (intr)
+ trace_otx2_msg_interrupt(pf->pdev, "VF(s) to PF", intr);
+
+ irq_data->pf_queue_work_hdlr(mbox, pf->mbox_pfvf_wq, irq_data->start,
+ irq_data->mdevs, intr);
+
+ return IRQ_HANDLED;
+}
+
+int cn20k_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
+{
+ struct otx2_hw *hw = &pf->hw;
+ struct pf_irq_data *irq_data;
+ int intr_vec, ret, vec = 0;
+ char *irq_name;
+
+ /* irq data for 4 PF intr vectors */
+ irq_data = devm_kcalloc(pf->dev, 4,
+ sizeof(struct pf_irq_data), GFP_KERNEL);
+ if (!irq_data)
+ return -ENOMEM;
+
+ for (intr_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0; intr_vec <=
+ RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1; intr_vec++, vec++) {
+ switch (intr_vec) {
+ case RVU_MBOX_PF_INT_VEC_VFPF_MBOX0:
+ irq_data[vec].intr_status =
+ RVU_MBOX_PF_VFPF_INTX(0);
+ irq_data[vec].start = 0;
+ irq_data[vec].mdevs = 64;
+ break;
+ case RVU_MBOX_PF_INT_VEC_VFPF_MBOX1:
+ irq_data[vec].intr_status =
+ RVU_MBOX_PF_VFPF_INTX(1);
+ irq_data[vec].start = 64;
+ irq_data[vec].mdevs = 96;
+ break;
+ case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX0:
+ irq_data[vec].intr_status =
+ RVU_MBOX_PF_VFPF1_INTX(0);
+ irq_data[vec].start = 0;
+ irq_data[vec].mdevs = 64;
+ break;
+ case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1:
+ irq_data[vec].intr_status =
+ RVU_MBOX_PF_VFPF1_INTX(1);
+ irq_data[vec].start = 64;
+ irq_data[vec].mdevs = 96;
+ break;
+ }
+ irq_data[vec].pf_queue_work_hdlr = otx2_queue_vf_work;
+ irq_data[vec].vec_num = intr_vec;
+ irq_data[vec].pf = pf;
+
+ /* Register mailbox interrupt handler */
+ irq_name = &hw->irq_name[intr_vec * NAME_SIZE];
+ if (pf->pcifunc)
+ snprintf(irq_name, NAME_SIZE,
+ "RVUPF%d_VF%d Mbox%d", rvu_get_pf(pf->pdev,
+ pf->pcifunc), vec / 2, vec % 2);
+ else
+ snprintf(irq_name, NAME_SIZE, "RVUPF_VF%d Mbox%d",
+ vec / 2, vec % 2);
+
+ hw->pfvf_irq_devid[vec] = &irq_data[vec];
+ ret = request_irq(pci_irq_vector(pf->pdev, intr_vec),
+ pf->hw_ops->pfvf_mbox_intr_handler, 0,
+ irq_name,
+ &irq_data[vec]);
+ if (ret) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for PFVF mbox0 irq\n");
+ return ret;
+ }
+ }
+
+ cn20k_enable_pfvf_mbox_intr(pf, numvfs);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h
new file mode 100644
index 000000000000..832adaf8c57f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#ifndef CN20K_H
+#define CN20K_H
+
+#include "otx2_common.h"
+
+void cn20k_init(struct otx2_nic *pfvf);
+int cn20k_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs);
+void cn20k_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs);
+void cn20k_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs);
+#endif /* CN20K_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 6b5c9536d26d..f674729124e6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -28,12 +28,12 @@ static void otx2_nix_rq_op_stats(struct queue_stats *stats,
struct otx2_nic *pfvf, int qidx)
{
u64 incr = (u64)qidx << 32;
- u64 *ptr;
+ void __iomem *ptr;
- ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS);
+ ptr = otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS);
stats->bytes = otx2_atomic64_add(incr, ptr);
- ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS);
+ ptr = otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS);
stats->pkts = otx2_atomic64_add(incr, ptr);
}
@@ -41,12 +41,12 @@ static void otx2_nix_sq_op_stats(struct queue_stats *stats,
struct otx2_nic *pfvf, int qidx)
{
u64 incr = (u64)qidx << 32;
- u64 *ptr;
+ void __iomem *ptr;
- ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS);
+ ptr = otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS);
stats->bytes = otx2_atomic64_add(incr, ptr);
- ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS);
+ ptr = otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS);
stats->pkts = otx2_atomic64_add(incr, ptr);
}
@@ -318,21 +318,20 @@ fail:
return err;
}
-int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id)
+int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id, const u32 *ind_tbl)
{
struct otx2_rss_info *rss = &pfvf->hw.rss_info;
const int index = rss->rss_size * ctx_id;
struct mbox *mbox = &pfvf->mbox;
- struct otx2_rss_ctx *rss_ctx;
struct nix_aq_enq_req *aq;
int idx, err;
mutex_lock(&mbox->lock);
- rss_ctx = rss->rss_ctx[ctx_id];
+ ind_tbl = ind_tbl ?: rss->ind_tbl;
/* Get memory to put this msg */
for (idx = 0; idx < rss->rss_size; idx++) {
/* Ignore the queue if AF_XDP zero copy is enabled */
- if (test_bit(rss_ctx->ind_tbl[idx], pfvf->af_xdp_zc_qidx))
+ if (test_bit(ind_tbl[idx], pfvf->af_xdp_zc_qidx))
continue;
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
@@ -352,7 +351,7 @@ int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id)
}
}
- aq->rss.rq = rss_ctx->ind_tbl[idx];
+ aq->rss.rq = ind_tbl[idx];
/* Fill AQ info */
aq->qidx = index + idx;
@@ -390,30 +389,22 @@ void otx2_set_rss_key(struct otx2_nic *pfvf)
int otx2_rss_init(struct otx2_nic *pfvf)
{
struct otx2_rss_info *rss = &pfvf->hw.rss_info;
- struct otx2_rss_ctx *rss_ctx;
int idx, ret = 0;
- rss->rss_size = sizeof(*rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]);
+ rss->rss_size = sizeof(*rss->ind_tbl);
/* Init RSS key if it is not setup already */
if (!rss->enable)
netdev_rss_key_fill(rss->key, sizeof(rss->key));
otx2_set_rss_key(pfvf);
- if (!netif_is_rxfh_configured(pfvf->netdev)) {
- /* Set RSS group 0 as default indirection table */
- rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP] = kzalloc(rss->rss_size,
- GFP_KERNEL);
- if (!rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP])
- return -ENOMEM;
-
- rss_ctx = rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP];
+ if (!netif_is_rxfh_configured(pfvf->netdev))
for (idx = 0; idx < rss->rss_size; idx++)
- rss_ctx->ind_tbl[idx] =
+ rss->ind_tbl[idx] =
ethtool_rxfh_indir_default(idx,
pfvf->hw.rx_queues);
- }
- ret = otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP);
+
+ ret = otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP, NULL);
if (ret)
return ret;
@@ -860,9 +851,10 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
{
int qidx, sqe_tail, sqe_head;
struct otx2_snd_queue *sq;
- u64 incr, *ptr, val;
+ void __iomem *ptr;
+ u64 incr, val;
- ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
+ ptr = otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) {
sq = &pfvf->qset.sq[qidx];
if (!sq->sqb_ptrs)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index ca0e6ab12ceb..e3765b73c434 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -28,10 +28,12 @@
#include "otx2_reg.h"
#include "otx2_txrx.h"
#include "otx2_devlink.h"
+#include <rvu.h>
#include <rvu_trace.h>
#include "qos.h"
#include "rep.h"
#include "cn10k_ipsec.h"
+#include "cn20k.h"
/* IPv4 flag more fragment bit */
#define IPV4_FLAG_MORE 0x20
@@ -61,6 +63,12 @@
/* Number of segments per SG structure */
#define MAX_SEGS_PER_SG 3
+irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq);
+irqreturn_t cn20k_pfaf_mbox_intr_handler(int irq, void *pf_irq);
+irqreturn_t cn20k_vfaf_mbox_intr_handler(int irq, void *vf_irq);
+irqreturn_t cn20k_pfvf_mbox_intr_handler(int irq, void *pf_irq);
+irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq);
+
enum arua_mapped_qtypes {
AURA_NIX_RQ,
AURA_NIX_SQ,
@@ -85,10 +93,6 @@ struct otx2_lmt_info {
u64 lmt_addr;
u16 lmt_id;
};
-/* RSS configuration */
-struct otx2_rss_ctx {
- u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
-};
struct otx2_rss_info {
u8 enable;
@@ -96,7 +100,7 @@ struct otx2_rss_info {
u16 rss_size;
#define RSS_HASH_KEY_SIZE 44 /* 352 bit key */
u8 key[RSS_HASH_KEY_SIZE];
- struct otx2_rss_ctx *rss_ctx[MAX_RSS_GROUPS];
+ u32 ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
};
/* NIX (or NPC) RX errors */
@@ -245,6 +249,7 @@ struct otx2_hw {
u16 nix_msixoff; /* Offset of NIX vectors */
char *irq_name;
cpumask_var_t *affinity_mask;
+ struct pf_irq_data *pfvf_irq_devid[4];
/* Stats */
struct otx2_dev_stats dev_stats;
@@ -366,6 +371,9 @@ struct dev_hw_ops {
int size, int qidx);
int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
void (*aura_freeptr)(void *dev, int aura, u64 buf);
+ irqreturn_t (*pfaf_mbox_intr_handler)(int irq, void *pf_irq);
+ irqreturn_t (*vfaf_mbox_intr_handler)(int irq, void *pf_irq);
+ irqreturn_t (*pfvf_mbox_intr_handler)(int irq, void *pf_irq);
};
#define CN10K_MCS_SA_PER_SC 4
@@ -433,6 +441,16 @@ struct cn10k_mcs_cfg {
struct list_head rxsc_list;
};
+struct pf_irq_data {
+ u64 intr_status;
+ void (*pf_queue_work_hdlr)(struct mbox *mb, struct workqueue_struct *mw,
+ int first, int mdevs, u64 intr);
+ struct otx2_nic *pf;
+ int vec_num;
+ int start;
+ int mdevs;
+};
+
struct otx2_nic {
void __iomem *reg_base;
struct net_device *netdev;
@@ -476,6 +494,7 @@ struct otx2_nic {
struct mbox *mbox_pfvf;
struct workqueue_struct *mbox_wq;
struct workqueue_struct *mbox_pfvf_wq;
+ struct qmem *pfvf_mbox_addr;
u8 total_vfs;
u16 pcifunc; /* RVU PF_FUNC */
@@ -730,8 +749,9 @@ static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr)
::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr));
}
-static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr)
+static inline u64 otx2_atomic64_add(u64 incr, void __iomem *addr)
{
+ u64 __iomem *ptr = addr;
u64 result;
__asm__ volatile(".cpu generic+lse\n"
@@ -744,7 +764,11 @@ static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr)
#else
#define otx2_write128(lo, hi, addr) writeq((hi) | (lo), addr)
-#define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; })
+
+static inline u64 otx2_atomic64_add(u64 incr, void __iomem *addr)
+{
+ return 0;
+}
#endif
static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
@@ -794,7 +818,7 @@ static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf)
/* Alloc pointer from pool/aura */
static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
{
- u64 *ptr = (__force u64 *)otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_ALLOCX(0));
+ void __iomem *ptr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_ALLOCX(0));
u64 incr = (u64)aura | BIT_ULL(63);
return otx2_atomic64_add(incr, ptr);
@@ -899,21 +923,11 @@ MBOX_UP_MCS_MESSAGES
/* Time to wait before watchdog kicks off */
#define OTX2_TX_TIMEOUT (100 * HZ)
-#define RVU_PFVF_PF_SHIFT 10
-#define RVU_PFVF_PF_MASK 0x3F
-#define RVU_PFVF_FUNC_SHIFT 0
-#define RVU_PFVF_FUNC_MASK 0x3FF
-
static inline bool is_otx2_vf(u16 pcifunc)
{
return !!(pcifunc & RVU_PFVF_FUNC_MASK);
}
-static inline int rvu_get_pf(u16 pcifunc)
-{
- return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
-}
-
static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf,
struct page *page,
size_t offset, size_t size,
@@ -1049,7 +1063,7 @@ int otx2_set_hw_capabilities(struct otx2_nic *pfvf);
int otx2_rss_init(struct otx2_nic *pfvf);
int otx2_set_flowkey_cfg(struct otx2_nic *pfvf);
void otx2_set_rss_key(struct otx2_nic *pfvf);
-int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id);
+int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id, const u32 *ind_tbl);
/* Mbox handlers */
void mbox_handler_msix_offset(struct otx2_nic *pfvf,
@@ -1191,4 +1205,6 @@ dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
struct sk_buff *skb, int seg, int *len);
void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg);
int otx2_read_free_sqe(struct otx2_nic *pfvf, u16 qidx);
+void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
+ int first, int mdevs, u64 intr);
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 45b8c9230184..998c734ff839 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -15,6 +15,7 @@
#include "otx2_common.h"
#include "otx2_ptp.h"
+#include <cgx_fw_if.h>
#define DRV_NAME "rvu-nicpf"
#define DRV_VF_NAME "rvu-nicvf"
@@ -559,10 +560,13 @@ static int otx2_set_coalesce(struct net_device *netdev,
return 0;
}
-static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
- struct ethtool_rxnfc *nfc)
+static int otx2_get_rss_hash_opts(struct net_device *dev,
+ struct ethtool_rxfh_fields *nfc)
{
- struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ struct otx2_rss_info *rss;
+
+ rss = &pfvf->hw.rss_info;
if (!(rss->flowkey_cfg &
(NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6)))
@@ -609,12 +613,17 @@ static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
return 0;
}
-static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf,
- struct ethtool_rxnfc *nfc)
+static int otx2_set_rss_hash_opts(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
- struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ struct otx2_nic *pfvf = netdev_priv(dev);
u32 rxh_l4 = RXH_L4_B_0_1 | RXH_L4_B_2_3;
- u32 rss_cfg = rss->flowkey_cfg;
+ struct otx2_rss_info *rss;
+ u32 rss_cfg;
+
+ rss = &pfvf->hw.rss_info;
+ rss_cfg = rss->flowkey_cfg;
if (!rss->enable) {
netdev_err(pfvf->netdev,
@@ -743,8 +752,6 @@ static int otx2_get_rxnfc(struct net_device *dev,
if (netif_running(dev) && ntuple)
ret = otx2_get_all_flows(pfvf, nfc, rules);
break;
- case ETHTOOL_GRXFH:
- return otx2_get_rss_hash_opts(pfvf, nfc);
default:
break;
}
@@ -759,9 +766,6 @@ static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
pfvf->flow_cfg->ntuple = ntuple;
switch (nfc->cmd) {
- case ETHTOOL_SRXFH:
- ret = otx2_set_rss_hash_opts(pfvf, nfc);
- break;
case ETHTOOL_SRXCLSRLINS:
if (netif_running(dev) && ntuple)
ret = otx2_add_flow(pfvf, nfc);
@@ -792,60 +796,91 @@ static u32 otx2_get_rxfh_indir_size(struct net_device *dev)
return MAX_RSS_INDIR_TBL_SIZE;
}
-static int otx2_rss_ctx_delete(struct otx2_nic *pfvf, int ctx_id)
+static int otx2_create_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
- struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ struct otx2_rss_info *rss;
+ unsigned int queues;
+ u32 *ind_tbl;
+ int idx;
+
+ rss = &pfvf->hw.rss_info;
+ queues = pfvf->hw.rx_queues;
- otx2_rss_ctx_flow_del(pfvf, ctx_id);
- kfree(rss->rss_ctx[ctx_id]);
- rss->rss_ctx[ctx_id] = NULL;
+ if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+ ctx->hfunc = ETH_RSS_HASH_TOP;
+ if (!rss->enable) {
+ netdev_err(dev, "RSS is disabled, cannot change settings\n");
+ return -EIO;
+ }
+
+ ind_tbl = rxfh->indir;
+ if (!ind_tbl) {
+ ind_tbl = ethtool_rxfh_context_indir(ctx);
+ for (idx = 0; idx < rss->rss_size; idx++)
+ ind_tbl[idx] = ethtool_rxfh_indir_default(idx, queues);
+ }
+
+ otx2_set_rss_table(pfvf, rxfh->rss_context, ind_tbl);
return 0;
}
-static int otx2_rss_ctx_create(struct otx2_nic *pfvf,
- u32 *rss_context)
+static int otx2_modify_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
- struct otx2_rss_info *rss = &pfvf->hw.rss_info;
- u8 ctx;
+ struct otx2_nic *pfvf = netdev_priv(dev);
- for (ctx = 0; ctx < MAX_RSS_GROUPS; ctx++) {
- if (!rss->rss_ctx[ctx])
- break;
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (!pfvf->hw.rss_info.enable) {
+ netdev_err(dev, "RSS is disabled, cannot change settings\n");
+ return -EIO;
}
- if (ctx == MAX_RSS_GROUPS)
- return -EINVAL;
- rss->rss_ctx[ctx] = kzalloc(sizeof(*rss->rss_ctx[ctx]), GFP_KERNEL);
- if (!rss->rss_ctx[ctx])
- return -ENOMEM;
- *rss_context = ctx;
+ if (rxfh->indir)
+ otx2_set_rss_table(pfvf, rxfh->rss_context, rxfh->indir);
return 0;
}
+static int otx2_remove_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ u32 rss_context,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+
+ if (!pfvf->hw.rss_info.enable) {
+ netdev_err(dev, "RSS is disabled, cannot change settings\n");
+ return -EIO;
+ }
+
+ otx2_rss_ctx_flow_del(pfvf, rss_context);
+ return 0;
+}
+
/* Configure RSS table and hash key */
static int otx2_set_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
- u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
struct otx2_nic *pfvf = netdev_priv(dev);
- struct otx2_rss_ctx *rss_ctx;
struct otx2_rss_info *rss;
- int ret, idx;
+ int idx;
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (rxfh->rss_context)
- rss_context = rxfh->rss_context;
-
- if (rss_context != ETH_RXFH_CONTEXT_ALLOC &&
- rss_context >= MAX_RSS_GROUPS)
- return -EINVAL;
-
rss = &pfvf->hw.rss_info;
if (!rss->enable) {
@@ -857,21 +892,12 @@ static int otx2_set_rxfh(struct net_device *dev,
memcpy(rss->key, rxfh->key, sizeof(rss->key));
otx2_set_rss_key(pfvf);
}
- if (rxfh->rss_delete)
- return otx2_rss_ctx_delete(pfvf, rss_context);
-
- if (rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- ret = otx2_rss_ctx_create(pfvf, &rss_context);
- rxfh->rss_context = rss_context;
- if (ret)
- return ret;
- }
+
if (rxfh->indir) {
- rss_ctx = rss->rss_ctx[rss_context];
for (idx = 0; idx < rss->rss_size; idx++)
- rss_ctx->ind_tbl[idx] = rxfh->indir[idx];
+ rss->ind_tbl[idx] = rxfh->indir[idx];
}
- otx2_set_rss_table(pfvf, rss_context);
+ otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP, NULL);
return 0;
}
@@ -880,9 +906,7 @@ static int otx2_set_rxfh(struct net_device *dev,
static int otx2_get_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh)
{
- u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
struct otx2_nic *pfvf = netdev_priv(dev);
- struct otx2_rss_ctx *rss_ctx;
struct otx2_rss_info *rss;
u32 *indir = rxfh->indir;
int idx, rx_queues;
@@ -890,32 +914,21 @@ static int otx2_get_rxfh(struct net_device *dev,
rss = &pfvf->hw.rss_info;
rxfh->hfunc = ETH_RSS_HASH_TOP;
- if (rxfh->rss_context)
- rss_context = rxfh->rss_context;
-
if (!indir)
return 0;
- if (!rss->enable && rss_context == DEFAULT_RSS_CONTEXT_GROUP) {
+ if (!rss->enable) {
rx_queues = pfvf->hw.rx_queues;
for (idx = 0; idx < MAX_RSS_INDIR_TBL_SIZE; idx++)
indir[idx] = ethtool_rxfh_indir_default(idx, rx_queues);
return 0;
}
- if (rss_context >= MAX_RSS_GROUPS)
- return -ENOENT;
-
- rss_ctx = rss->rss_ctx[rss_context];
- if (!rss_ctx)
- return -ENOENT;
-
- if (indir) {
- for (idx = 0; idx < rss->rss_size; idx++) {
- /* Ignore if the rx queue is AF_XDP zero copy enabled */
- if (test_bit(rss_ctx->ind_tbl[idx], pfvf->af_xdp_zc_qidx))
- continue;
- indir[idx] = rss_ctx->ind_tbl[idx];
- }
+
+ for (idx = 0; idx < rss->rss_size; idx++) {
+ /* Ignore if the rx queue is AF_XDP zero copy enabled */
+ if (test_bit(rss->ind_tbl[idx], pfvf->af_xdp_zc_qidx))
+ continue;
+ indir[idx] = rss->ind_tbl[idx];
}
if (rxfh->key)
memcpy(rxfh->key, rss->key, sizeof(rss->key));
@@ -1123,17 +1136,9 @@ static void otx2_get_link_mode_info(u64 link_mode_bmap,
*link_ksettings)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_link_modes) = { 0, };
- const int otx2_sgmii_features[6] = {
- ETHTOOL_LINK_MODE_10baseT_Half_BIT,
- ETHTOOL_LINK_MODE_10baseT_Full_BIT,
- ETHTOOL_LINK_MODE_100baseT_Half_BIT,
- ETHTOOL_LINK_MODE_100baseT_Full_BIT,
- ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
- ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
- };
/* CGX link modes to Ethtool link mode mapping */
- const int cgx_link_mode[27] = {
- 0, /* SGMII Mode */
+ const int cgx_link_mode[CGX_MODE_MAX] = {
+ 0, /* SGMII 1000baseT */
ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
@@ -1163,14 +1168,19 @@ static void otx2_get_link_mode_info(u64 link_mode_bmap,
};
u8 bit;
- for_each_set_bit(bit, (unsigned long *)&link_mode_bmap, 27) {
- /* SGMII mode is set */
- if (bit == 0)
- linkmode_set_bit_array(otx2_sgmii_features,
- ARRAY_SIZE(otx2_sgmii_features),
- otx2_link_modes);
- else
+ for_each_set_bit(bit, (unsigned long *)&link_mode_bmap, ARRAY_SIZE(cgx_link_mode)) {
+ if (bit == CGX_MODE_SGMII_10M_BIT) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, otx2_link_modes);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, otx2_link_modes);
+ } else if (bit == CGX_MODE_SGMII_100M_BIT) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, otx2_link_modes);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, otx2_link_modes);
+ } else if (bit == CGX_MODE_SGMII) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, otx2_link_modes);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, otx2_link_modes);
+ } else {
linkmode_set_bit(cgx_link_mode[bit], otx2_link_modes);
+ }
}
if (req_mode == OTX2_MODE_ADVERTISED)
@@ -1211,23 +1221,10 @@ static int otx2_get_link_ksettings(struct net_device *netdev,
return 0;
}
-static void otx2_get_advertised_mode(const struct ethtool_link_ksettings *cmd,
- u64 *mode)
-{
- u32 bit_pos;
-
- /* Firmware does not support requesting multiple advertised modes
- * return first set bit
- */
- bit_pos = find_first_bit(cmd->link_modes.advertising,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- if (bit_pos != __ETHTOOL_LINK_MODE_MASK_NBITS)
- *mode = bit_pos;
-}
-
static int otx2_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct otx2_nic *pf = netdev_priv(netdev);
struct ethtool_link_ksettings cur_ks;
struct cgx_set_link_mode_req *req;
@@ -1264,7 +1261,20 @@ static int otx2_set_link_ksettings(struct net_device *netdev,
*/
req->args.duplex = cmd->base.duplex ^ 0x1;
req->args.an = cmd->base.autoneg;
- otx2_get_advertised_mode(cmd, &req->args.mode);
+ /* Mask unsupported modes and send message to AF */
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mask);
+
+ linkmode_copy(req->args.advertising,
+ cmd->link_modes.advertising);
+ linkmode_andnot(req->args.advertising,
+ req->args.advertising, mask);
+
+ /* inform AF that we need parse this differently */
+ if (bitmap_weight(req->args.advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS) >= 2)
+ req->args.multimode = true;
err = otx2_sync_mbox_msg(&pf->mbox);
end:
@@ -1306,12 +1316,12 @@ static void otx2_get_fec_stats(struct net_device *netdev,
}
static const struct ethtool_ops otx2_ethtool_ops = {
- .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE,
.supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN |
ETHTOOL_RING_USE_CQE_SIZE,
+ .rxfh_max_num_contexts = MAX_RSS_GROUPS,
.get_link = otx2_get_link,
.get_drvinfo = otx2_get_drvinfo,
.get_strings = otx2_get_strings,
@@ -1329,6 +1339,11 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
.set_rxfh = otx2_set_rxfh,
+ .get_rxfh_fields = otx2_get_rss_hash_opts,
+ .set_rxfh_fields = otx2_set_rss_hash_opts,
+ .create_rxfh_context = otx2_create_rxfh,
+ .modify_rxfh_context = otx2_modify_rxfh,
+ .remove_rxfh_context = otx2_remove_rxfh,
.get_msglevel = otx2_get_msglevel,
.set_msglevel = otx2_set_msglevel,
.get_pauseparam = otx2_get_pauseparam,
@@ -1423,12 +1438,12 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev,
}
static const struct ethtool_ops otx2vf_ethtool_ops = {
- .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE,
.supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN |
ETHTOOL_RING_USE_CQE_SIZE,
+ .rxfh_max_num_contexts = MAX_RSS_GROUPS,
.get_link = otx2_get_link,
.get_drvinfo = otx2vf_get_drvinfo,
.get_strings = otx2vf_get_strings,
@@ -1442,6 +1457,11 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
.set_rxfh = otx2_set_rxfh,
+ .get_rxfh_fields = otx2_get_rss_hash_opts,
+ .set_rxfh_fields = otx2_set_rss_hash_opts,
+ .create_rxfh_context = otx2_create_rxfh,
+ .modify_rxfh_context = otx2_modify_rxfh,
+ .remove_rxfh_context = otx2_remove_rxfh,
.get_ringparam = otx2_get_ringparam,
.set_ringparam = otx2_set_ringparam,
.get_coalesce = otx2_get_coalesce,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index db7c466fdc39..b23585c5e5c2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -206,7 +206,8 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
/* Register ME interrupt handler*/
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE];
- snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc));
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0",
+ rvu_get_pf(pf->pdev, pf->pcifunc));
ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0),
otx2_pf_me_intr_handler, 0, irq_name, pf);
if (ret) {
@@ -216,7 +217,8 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
/* Register FLR interrupt handler */
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE];
- snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc));
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0",
+ rvu_get_pf(pf->pdev, pf->pcifunc));
ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0),
otx2_pf_flr_intr_handler, 0, irq_name, pf);
if (ret) {
@@ -228,7 +230,7 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
if (numvfs > 64) {
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE];
snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1",
- rvu_get_pf(pf->pcifunc));
+ rvu_get_pf(pf->pdev, pf->pcifunc));
ret = request_irq(pci_irq_vector
(pf->pdev, RVU_PF_INT_VEC_VFME1),
otx2_pf_me_intr_handler, 0, irq_name, pf);
@@ -238,7 +240,7 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
}
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE];
snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1",
- rvu_get_pf(pf->pcifunc));
+ rvu_get_pf(pf->pdev, pf->pcifunc));
ret = request_irq(pci_irq_vector
(pf->pdev, RVU_PF_INT_VEC_VFFLR1),
otx2_pf_flr_intr_handler, 0, irq_name, pf);
@@ -294,8 +296,8 @@ static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
return 0;
}
-static void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
- int first, int mdevs, u64 intr)
+void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
+ int first, int mdevs, u64 intr)
{
struct otx2_mbox_dev *mdev;
struct otx2_mbox *mbox;
@@ -545,7 +547,7 @@ end:
}
}
-static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
+irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
{
struct otx2_nic *pf = (struct otx2_nic *)(pf_irq);
int vfs = pf->total_vfs;
@@ -574,6 +576,23 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
return IRQ_HANDLED;
}
+static void *cn20k_pfvf_mbox_alloc(struct otx2_nic *pf, int numvfs)
+{
+ struct qmem *mbox_addr;
+ int err;
+
+ err = qmem_alloc(&pf->pdev->dev, &mbox_addr, numvfs, MBOX_SIZE);
+ if (err) {
+ dev_err(pf->dev, "qmem alloc fail\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ otx2_write64(pf, RVU_PF_VF_MBOX_ADDR, (u64)mbox_addr->iova);
+ pf->pfvf_mbox_addr = mbox_addr;
+
+ return mbox_addr->base;
+}
+
static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
{
void __iomem *hwbase;
@@ -595,20 +614,27 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
if (!pf->mbox_pfvf_wq)
return -ENOMEM;
- /* On CN10K platform, PF <-> VF mailbox region follows after
- * PF <-> AF mailbox region.
+ /* For CN20K, PF allocates mbox memory in DRAM and writes PF/VF
+ * regions/offsets in RVU_PF_VF_MBOX_ADDR, the RVU_PFX_FUNC_PFAF_MBOX
+ * gives the aliased address to access PF/VF mailbox regions.
*/
- if (test_bit(CN10K_MBOX, &pf->hw.cap_flag))
- base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
- MBOX_SIZE;
- else
- base = readq((void __iomem *)((u64)pf->reg_base +
- RVU_PF_VF_BAR4_ADDR));
+ if (is_cn20k(pf->pdev)) {
+ hwbase = (void __iomem *)cn20k_pfvf_mbox_alloc(pf, numvfs);
+ } else {
+ /* On CN10K platform, PF <-> VF mailbox region follows after
+ * PF <-> AF mailbox region.
+ */
+ if (test_bit(CN10K_MBOX, &pf->hw.cap_flag))
+ base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
+ MBOX_SIZE;
+ else
+ base = readq(pf->reg_base + RVU_PF_VF_BAR4_ADDR);
- hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
- if (!hwbase) {
- err = -ENOMEM;
- goto free_wq;
+ hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
+ if (!hwbase) {
+ err = -ENOMEM;
+ goto free_wq;
+ }
}
mbox = &pf->mbox_pfvf[0];
@@ -632,7 +658,7 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
return 0;
free_iomem:
- if (hwbase)
+ if (hwbase && !(is_cn20k(pf->pdev)))
iounmap(hwbase);
free_wq:
destroy_workqueue(pf->mbox_pfvf_wq);
@@ -651,8 +677,10 @@ static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf)
pf->mbox_pfvf_wq = NULL;
}
- if (mbox->mbox.hwbase)
+ if (mbox->mbox.hwbase && !is_cn20k(pf->pdev))
iounmap(mbox->mbox.hwbase);
+ else
+ qmem_free(&pf->pdev->dev, pf->pfvf_mbox_addr);
otx2_mbox_destroy(&mbox->mbox);
}
@@ -676,6 +704,9 @@ static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
{
int vector;
+ if (is_cn20k(pf->pdev))
+ return cn20k_disable_pfvf_mbox_intr(pf, numvfs);
+
/* Disable PF <=> VF mailbox IRQ */
otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull);
otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull);
@@ -697,11 +728,14 @@ static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
char *irq_name;
int err;
+ if (is_cn20k(pf->pdev))
+ return cn20k_register_pfvf_mbox_intr(pf, numvfs);
+
/* Register MBOX0 interrupt handler */
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE];
if (pf->pcifunc)
snprintf(irq_name, NAME_SIZE,
- "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc));
+ "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pdev, pf->pcifunc));
else
snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0");
err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0),
@@ -717,7 +751,8 @@ static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE];
if (pf->pcifunc)
snprintf(irq_name, NAME_SIZE,
- "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc));
+ "RVUPF%d_VF Mbox1",
+ rvu_get_pf(pf->pdev, pf->pcifunc));
else
snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1");
err = request_irq(pci_irq_vector(pf->pdev,
@@ -1006,7 +1041,7 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
otx2_mbox_msg_send(mbox, 0);
}
-static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
+irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
{
struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
struct mbox *mw = &pf->mbox;
@@ -1064,10 +1099,18 @@ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
void otx2_disable_mbox_intr(struct otx2_nic *pf)
{
- int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX);
+ int vector;
/* Disable AF => PF mailbox IRQ */
- otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0));
+ if (!is_cn20k(pf->pdev)) {
+ vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX);
+ otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0));
+ } else {
+ vector = pci_irq_vector(pf->pdev,
+ RVU_MBOX_PF_INT_VEC_AFPF_MBOX);
+ otx2_write64(pf, RVU_PF_INT_ENA_W1C,
+ BIT_ULL(0) | BIT_ULL(1));
+ }
free_irq(vector, pf);
}
EXPORT_SYMBOL(otx2_disable_mbox_intr);
@@ -1080,10 +1123,24 @@ int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
int err;
/* Register mailbox interrupt handler */
- irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE];
- snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox");
- err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX),
- otx2_pfaf_mbox_intr_handler, 0, irq_name, pf);
+ if (!is_cn20k(pf->pdev)) {
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d AFPF Mbox",
+ rvu_get_pf(pf->pdev, pf->pcifunc));
+ err = request_irq(pci_irq_vector
+ (pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX),
+ pf->hw_ops->pfaf_mbox_intr_handler,
+ 0, irq_name, pf);
+ } else {
+ irq_name = &hw->irq_name[RVU_MBOX_PF_INT_VEC_AFPF_MBOX *
+ NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d AFPF Mbox",
+ rvu_get_pf(pf->pdev, pf->pcifunc));
+ err = request_irq(pci_irq_vector
+ (pf->pdev, RVU_MBOX_PF_INT_VEC_AFPF_MBOX),
+ pf->hw_ops->pfaf_mbox_intr_handler,
+ 0, irq_name, pf);
+ }
if (err) {
dev_err(pf->dev,
"RVUPF: IRQ registration failed for PFAF mbox irq\n");
@@ -1093,8 +1150,14 @@ int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
/* Enable mailbox interrupt for msgs coming from AF.
* First clear to avoid spurious interrupts, if any.
*/
- otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
- otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
+ if (!is_cn20k(pf->pdev)) {
+ otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
+ otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
+ } else {
+ otx2_write64(pf, RVU_PF_INT, BIT_ULL(0) | BIT_ULL(1));
+ otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0) |
+ BIT_ULL(1));
+ }
if (!probe_af)
return 0;
@@ -1125,7 +1188,7 @@ void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
pf->mbox_wq = NULL;
}
- if (mbox->mbox.hwbase)
+ if (mbox->mbox.hwbase && !is_cn20k(pf->pdev))
iounmap((void __iomem *)mbox->mbox.hwbase);
otx2_mbox_destroy(&mbox->mbox);
@@ -1145,12 +1208,20 @@ int otx2_pfaf_mbox_init(struct otx2_nic *pf)
if (!pf->mbox_wq)
return -ENOMEM;
- /* Mailbox is a reserved memory (in RAM) region shared between
- * admin function (i.e AF) and this PF, shouldn't be mapped as
- * device memory to allow unaligned accesses.
+ /* For CN20K, AF allocates mbox memory in DRAM and writes PF
+ * regions/offsets in RVU_MBOX_AF_PFX_ADDR, the RVU_PFX_FUNC_PFAF_MBOX
+ * gives the aliased address to access AF/PF mailbox regions.
*/
- hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM),
- MBOX_SIZE);
+ if (is_cn20k(pf->pdev))
+ hwbase = pf->reg_base + RVU_PFX_FUNC_PFAF_MBOX +
+ ((u64)BLKADDR_MBOX << RVU_FUNC_BLKADDR_SHIFT);
+ else
+ /* Mailbox is a reserved memory (in RAM) region shared between
+ * admin function (i.e AF) and this PF, shouldn't be mapped as
+ * device memory to allow unaligned accesses.
+ */
+ hwbase = ioremap_wc(pci_resource_start
+ (pf->pdev, PCI_MBOX_BAR_NUM), MBOX_SIZE);
if (!hwbase) {
dev_err(pf->dev, "Unable to map PFAF mailbox region\n");
err = -ENOMEM;
@@ -1323,8 +1394,8 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
{
struct otx2_nic *pf = data;
struct otx2_snd_queue *sq;
- u64 val, *ptr;
- u64 qidx = 0;
+ void __iomem *ptr;
+ u64 val, qidx = 0;
/* CQ */
for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) {
@@ -1972,7 +2043,7 @@ int otx2_open(struct net_device *netdev)
if (err) {
dev_err(pf->dev,
"RVUPF%d: IRQ registration failed for QERR\n",
- rvu_get_pf(pf->pcifunc));
+ rvu_get_pf(pf->pdev, pf->pcifunc));
goto err_disable_napi;
}
@@ -1990,7 +2061,7 @@ int otx2_open(struct net_device *netdev)
if (name_len >= NAME_SIZE) {
dev_err(pf->dev,
"RVUPF%d: IRQ registration failed for CQ%d, irq name is too long\n",
- rvu_get_pf(pf->pcifunc), qidx);
+ rvu_get_pf(pf->pdev, pf->pcifunc), qidx);
err = -EINVAL;
goto err_free_cints;
}
@@ -2001,7 +2072,7 @@ int otx2_open(struct net_device *netdev)
if (err) {
dev_err(pf->dev,
"RVUPF%d: IRQ registration failed for CQ%d\n",
- rvu_get_pf(pf->pcifunc), qidx);
+ rvu_get_pf(pf->pdev, pf->pcifunc), qidx);
goto err_free_cints;
}
vec++;
@@ -2087,7 +2158,6 @@ int otx2_stop(struct net_device *netdev)
struct otx2_nic *pf = netdev_priv(netdev);
struct otx2_cq_poll *cq_poll = NULL;
struct otx2_qset *qset = &pf->qset;
- struct otx2_rss_info *rss;
int qidx, vec, wrk;
/* If the DOWN flag is set resources are already freed */
@@ -2105,10 +2175,7 @@ int otx2_stop(struct net_device *netdev)
otx2_rxtx_enable(pf, false);
/* Clear RSS enable flag */
- rss = &pf->hw.rss_info;
- rss->enable = false;
- if (!netif_is_rxfh_configured(netdev))
- kfree(rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]);
+ pf->hw.rss_info.enable = false;
/* Cleanup Queue IRQ */
vec = pci_irq_vector(pf->pdev,
@@ -2998,8 +3065,13 @@ int otx2_init_rsrc(struct pci_dev *pdev, struct otx2_nic *pf)
if (err)
return err;
- err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT,
- RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
+ if (!is_cn20k(pf->pdev))
+ err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT,
+ RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
+ else
+ err = pci_alloc_irq_vectors(hw->pdev, RVU_MBOX_PF_INT_VEC_CNT,
+ RVU_MBOX_PF_INT_VEC_CNT,
+ PCI_IRQ_MSIX);
if (err < 0) {
dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
__func__, num_vec);
@@ -3008,6 +3080,11 @@ int otx2_init_rsrc(struct pci_dev *pdev, struct otx2_nic *pf)
otx2_setup_dev_hw_settings(pf);
+ if (is_cn20k(pf->pdev))
+ cn20k_init(pf);
+ else
+ otx2_init_hw_ops(pf);
+
/* Init PF <=> AF mailbox stuff */
err = otx2_pfaf_mbox_init(pf);
if (err)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index 63130ba37e9d..e52cc6b1a26c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -193,7 +193,7 @@ static int ptp_pps_on(struct otx2_ptp *ptp, int on, u64 period)
return otx2_sync_mbox_msg(&ptp->nic->mbox);
}
-static u64 ptp_cc_read(const struct cyclecounter *cc)
+static u64 ptp_cc_read(struct cyclecounter *cc)
{
struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
index e3aee6e36215..1cd576fd09c5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
@@ -44,6 +44,17 @@
#define RVU_PF_VF_MBOX_ADDR (0xC40)
#define RVU_PF_LMTLINE_ADDR (0xC48)
+#define RVU_MBOX_PF_VFX_PFVF_TRIGX(a) (0x2000 | (a) << 3)
+#define RVU_MBOX_PF_VFPF_INTX(a) (0x1000 | (a) << 3)
+#define RVU_MBOX_PF_VFPF_INT_W1SX(a) (0x1020 | (a) << 3)
+#define RVU_MBOX_PF_VFPF_INT_ENA_W1SX(a) (0x1040 | (a) << 3)
+#define RVU_MBOX_PF_VFPF_INT_ENA_W1CX(a) (0x1060 | (a) << 3)
+
+#define RVU_MBOX_PF_VFPF1_INTX(a) (0x1080 | (a) << 3)
+#define RVU_MBOX_PF_VFPF1_INT_W1SX(a) (0x10a0 | (a) << 3)
+#define RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(a) (0x10c0 | (a) << 3)
+#define RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(a) (0x10e0 | (a) << 3)
+
/* RVU VF registers */
#define RVU_VF_VFPF_MBOX0 (0x00000)
#define RVU_VF_VFPF_MBOX1 (0x00008)
@@ -58,6 +69,11 @@
#define RVU_VF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
#define RVU_VF_MBOX_REGION (0xC0000)
+/* CN20K RVU_MBOX_E: RVU PF/VF MBOX Address Range Enumeration */
+#define RVU_MBOX_AF_PFX_ADDR(a) (0x5000 | (a) << 4)
+#define RVU_PFX_FUNC_PFAF_MBOX (0x80000)
+#define RVU_PFX_FUNCX_VFAF_MBOX (0x40000)
+
#define RVU_FUNC_BLKADDR_SHIFT 20
#define RVU_FUNC_BLKADDR_MASK 0x1FULL
@@ -138,39 +154,12 @@
#define NIX_LF_CINTX_ENA_W1S(a) (NIX_LFBASE | 0xD40 | (a) << 12)
#define NIX_LF_CINTX_ENA_W1C(a) (NIX_LFBASE | 0xD50 | (a) << 12)
-/* NIX AF transmit scheduler registers */
-#define NIX_AF_SMQX_CFG(a) (0x700 | (u64)(a) << 16)
-#define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xB10 | (u64)(a) << 16)
-#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (u64)(a) << 16)
-#define NIX_AF_TL1X_CIR(a) (0xC20 | (u64)(a) << 16)
-#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (u64)(a) << 16)
-#define NIX_AF_TL2X_PARENT(a) (0xE88 | (u64)(a) << 16)
-#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (u64)(a) << 16)
-#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (u64)(a) << 16)
-#define NIX_AF_TL2X_CIR(a) (0xE20 | (u64)(a) << 16)
-#define NIX_AF_TL2X_PIR(a) (0xE30 | (u64)(a) << 16)
-#define NIX_AF_TL3X_PARENT(a) (0x1088 | (u64)(a) << 16)
-#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (u64)(a) << 16)
-#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (u64)(a) << 16)
-#define NIX_AF_TL3X_CIR(a) (0x1020 | (u64)(a) << 16)
-#define NIX_AF_TL3X_PIR(a) (0x1030 | (u64)(a) << 16)
-#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (u64)(a) << 16)
-#define NIX_AF_TL4X_PARENT(a) (0x1288 | (u64)(a) << 16)
-#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (u64)(a) << 16)
-#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (u64)(a) << 16)
-#define NIX_AF_TL4X_CIR(a) (0x1220 | (u64)(a) << 16)
-#define NIX_AF_TL4X_PIR(a) (0x1230 | (u64)(a) << 16)
-#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (u64)(a) << 16)
-#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (u64)(a) << 16)
-#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (u64)(a) << 16)
-#define NIX_AF_MDQX_CIR(a) (0x1420 | (u64)(a) << 16)
-#define NIX_AF_MDQX_PIR(a) (0x1430 | (u64)(a) << 16)
-#define NIX_AF_MDQX_PARENT(a) (0x1480 | (u64)(a) << 16)
-#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (u64)(a) << 16 | (b) << 3)
-
/* LMT LF registers */
#define LMT_LFBASE BIT_ULL(RVU_FUNC_BLKADDR_SHIFT)
#define LMT_LF_LMTLINEX(a) (LMT_LFBASE | 0x000 | (a) << 12)
#define LMT_LF_LMTCANCEL (LMT_LFBASE | 0x400)
+/* CN20K registers */
+#define RVU_PF_DISC (0x0)
+
#endif /* OTX2_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 9a226ca74425..5f80b23c5335 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -467,7 +467,8 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
target = act->dev;
if (target->dev.parent) {
priv = netdev_priv(target);
- if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) {
+ if (rvu_get_pf(nic->pdev, nic->pcifunc) !=
+ rvu_get_pf(nic->pdev, priv->pcifunc)) {
NL_SET_ERR_MSG_MOD(extack,
"can't redirect to other pf/vf");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 99ace381cc78..625bb5a05344 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -1571,7 +1571,7 @@ handle_xdp_verdict:
cq->pool_ptrs++;
if (xsk_buff) {
xsk_buff_free(xsk_buff);
- } else if (page->pp) {
+ } else if (pp_page_to_nmdesc(page)->pp) {
page_pool_recycle_direct(pool->page_pool, page);
} else {
otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 8a8b598bd389..5589fccd370b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -240,6 +240,10 @@ static void otx2vf_disable_mbox_intr(struct otx2_nic *vf)
/* Disable VF => PF mailbox IRQ */
otx2_write64(vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0));
+
+ if (is_cn20k(vf->pdev))
+ otx2_write64(vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0) | BIT_ULL(1));
+
free_irq(vector, vf);
}
@@ -252,9 +256,18 @@ static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf)
/* Register mailbox interrupt handler */
irq_name = &hw->irq_name[RVU_VF_INT_VEC_MBOX * NAME_SIZE];
- snprintf(irq_name, NAME_SIZE, "RVUVFAF Mbox");
- err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX),
- otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf);
+ snprintf(irq_name, NAME_SIZE, "RVUVF%d AFVF Mbox", ((vf->pcifunc &
+ RVU_PFVF_FUNC_MASK) - 1));
+
+ if (!is_cn20k(vf->pdev)) {
+ err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX),
+ otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf);
+ } else {
+ err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX),
+ vf->hw_ops->vfaf_mbox_intr_handler, 0, irq_name,
+ vf);
+ }
+
if (err) {
dev_err(vf->dev,
"RVUPF: IRQ registration failed for VFAF mbox irq\n");
@@ -264,8 +277,15 @@ static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf)
/* Enable mailbox interrupt for msgs coming from PF.
* First clear to avoid spurious interrupts, if any.
*/
- otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
- otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0));
+ if (!is_cn20k(vf->pdev)) {
+ otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
+ otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0));
+ } else {
+ otx2_write64(vf, RVU_VF_INT, BIT_ULL(0) | BIT_ULL(1) |
+ BIT_ULL(2) | BIT_ULL(3));
+ otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0) |
+ BIT_ULL(1) | BIT_ULL(2) | BIT_ULL(3));
+ }
if (!probe_pf)
return 0;
@@ -315,7 +335,13 @@ static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf)
if (!vf->mbox_wq)
return -ENOMEM;
- if (test_bit(CN10K_MBOX, &vf->hw.cap_flag)) {
+ /* For cn20k platform, VF mailbox region is in dram aliased from AF
+ * VF MBOX ADDR, MBOX is a separate RVU block.
+ */
+ if (is_cn20k(vf->pdev)) {
+ hwbase = vf->reg_base + RVU_VF_MBOX_REGION + ((u64)BLKADDR_MBOX <<
+ RVU_FUNC_BLKADDR_SHIFT);
+ } else if (test_bit(CN10K_MBOX, &vf->hw.cap_flag)) {
/* For cn10k platform, VF mailbox region is in its BAR2
* register space
*/
@@ -616,6 +642,12 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
otx2_setup_dev_hw_settings(vf);
+
+ if (is_cn20k(vf->pdev))
+ cn20k_init(vf);
+ else
+ otx2_init_hw_ops(vf);
+
/* Init VF <=> PF mailbox stuff */
err = otx2vf_vfaf_mbox_init(vf);
if (err)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c
index b328aae23d73..7d67b4cbaf71 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c
@@ -132,7 +132,7 @@ int otx2_xsk_pool_enable(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qi
set_bit(qidx, pf->af_xdp_zc_qidx);
otx2_clean_up_rq(pf, qidx);
/* Reconfigure RSS table as 'qidx' cannot be part of RSS now */
- otx2_set_rss_table(pf, DEFAULT_RSS_CONTEXT_GROUP);
+ otx2_set_rss_table(pf, DEFAULT_RSS_CONTEXT_GROUP, NULL);
/* Kick start the NAPI context so that receiving will start */
return otx2_xsk_wakeup(pf->netdev, qidx, XDP_WAKEUP_RX);
}
@@ -153,7 +153,7 @@ int otx2_xsk_pool_disable(struct otx2_nic *pf, u16 qidx)
clear_bit(qidx, pf->af_xdp_zc_qidx);
xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
/* Reconfigure RSS table as 'qidx' now need to be part of RSS now */
- otx2_set_rss_table(pf, DEFAULT_RSS_CONTEXT_GROUP);
+ otx2_set_rss_table(pf, DEFAULT_RSS_CONTEXT_GROUP, NULL);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
index 58d572ce08ef..2872adabc830 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
@@ -151,9 +151,10 @@ static void otx2_qos_sq_free_sqbs(struct otx2_nic *pfvf, int qidx)
static void otx2_qos_sqb_flush(struct otx2_nic *pfvf, int qidx)
{
int sqe_tail, sqe_head;
- u64 incr, *ptr, val;
+ void __iomem *ptr;
+ u64 incr, val;
- ptr = (__force u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
+ ptr = otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
incr = (u64)qidx << 32;
val = otx2_atomic64_add(incr, ptr);
sqe_head = (val >> 20) & 0x3F;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
index 2cd3da3b6843..25af98034e2e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
@@ -244,10 +244,10 @@ static int rvu_rep_devlink_port_register(struct rep_dev *rep)
if (!(rep->pcifunc & RVU_PFVF_FUNC_MASK)) {
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- attrs.phys.port_number = rvu_get_pf(rep->pcifunc);
+ attrs.phys.port_number = rvu_get_pf(priv->pdev, rep->pcifunc);
} else {
attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
- attrs.pci_vf.pf = rvu_get_pf(rep->pcifunc);
+ attrs.pci_vf.pf = rvu_get_pf(priv->pdev, rep->pcifunc);
attrs.pci_vf.vf = rep->pcifunc & RVU_PFVF_FUNC_MASK;
}
@@ -672,7 +672,8 @@ int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack)
rep->pcifunc = pcifunc;
snprintf(ndev->name, sizeof(ndev->name), "Rpf%dvf%d",
- rvu_get_pf(pcifunc), (pcifunc & RVU_PFVF_FUNC_MASK));
+ rvu_get_pf(priv->pdev, pcifunc),
+ (pcifunc & RVU_PFVF_FUNC_MASK));
ndev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index e4cfdc8bc055..68f8a1e36aa6 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1229,9 +1229,9 @@ static int pxa168_rx_poll(struct napi_struct *napi, int budget)
int work_done = 0;
/*
- * We call txq_reclaim every time since in NAPI interupts are disabled
- * and due to this we miss the TX_DONE interrupt,which is not updated in
- * interrupt status register.
+ * We call txq_reclaim every time since in NAPI interrupts are disabled
+ * and due to this we miss the TX_DONE interrupt, which is not updated
+ * in interrupt status register.
*/
txq_reclaim(dev, 0);
if (netif_queue_stopped(dev)
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index 7bfd3f230ff5..2ba361f8ce7d 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -17,6 +17,7 @@ config NET_MEDIATEK_SOC
select PINCTRL
select PHYLINK
select DIMLIB
+ select GENERIC_ALLOCATOR
select PAGE_POOL
select PAGE_POOL_STATS
select PCS_MTK_LYNXI
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index b38e4f2de674..5a5fcde76dc0 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -27,6 +27,7 @@
#include <net/dsa.h>
#include <net/dst_metadata.h>
#include <net/page_pool/helpers.h>
+#include <linux/genalloc.h>
#include "mtk_eth_soc.h"
#include "mtk_wed.h"
@@ -1267,6 +1268,34 @@ static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
return (void *)data;
}
+static void *mtk_dma_ring_alloc(struct mtk_eth *eth, size_t size,
+ dma_addr_t *dma_handle, bool use_sram)
+{
+ void *dma_ring;
+
+ if (use_sram && eth->sram_pool) {
+ dma_ring = (void *)gen_pool_alloc(eth->sram_pool, size);
+ if (!dma_ring)
+ return dma_ring;
+ *dma_handle = gen_pool_virt_to_phys(eth->sram_pool,
+ (unsigned long)dma_ring);
+ } else {
+ dma_ring = dma_alloc_coherent(eth->dma_dev, size, dma_handle,
+ GFP_KERNEL);
+ }
+
+ return dma_ring;
+}
+
+static void mtk_dma_ring_free(struct mtk_eth *eth, size_t size, void *dma_ring,
+ dma_addr_t dma_handle, bool in_sram)
+{
+ if (in_sram && eth->sram_pool)
+ gen_pool_free(eth->sram_pool, (unsigned long)dma_ring, size);
+ else
+ dma_free_coherent(eth->dma_dev, size, dma_ring, dma_handle);
+}
+
/* the qdma core needs scratch memory to be setup */
static int mtk_init_fq_dma(struct mtk_eth *eth)
{
@@ -1276,13 +1305,8 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
dma_addr_t dma_addr;
int i, j, len;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
- eth->scratch_ring = eth->sram_base;
- else
- eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
- cnt * soc->tx.desc_size,
- &eth->phy_scratch_ring,
- GFP_KERNEL);
+ eth->scratch_ring = mtk_dma_ring_alloc(eth, cnt * soc->tx.desc_size,
+ &eth->phy_scratch_ring, true);
if (unlikely(!eth->scratch_ring))
return -ENOMEM;
@@ -2620,14 +2644,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
if (!ring->buf)
goto no_tx_mem;
- if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
- ring->dma = eth->sram_base + soc->tx.fq_dma_size * sz;
- ring->phys = eth->phy_scratch_ring + soc->tx.fq_dma_size * (dma_addr_t)sz;
- } else {
- ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
- &ring->phys, GFP_KERNEL);
- }
-
+ ring->dma = mtk_dma_ring_alloc(eth, ring_size * sz, &ring->phys, true);
if (!ring->dma)
goto no_tx_mem;
@@ -2726,10 +2743,10 @@ static void mtk_tx_clean(struct mtk_eth *eth)
kfree(ring->buf);
ring->buf = NULL;
}
- if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
- dma_free_coherent(eth->dma_dev,
- ring->dma_size * soc->tx.desc_size,
- ring->dma, ring->phys);
+
+ if (ring->dma) {
+ mtk_dma_ring_free(eth, ring->dma_size * soc->tx.desc_size,
+ ring->dma, ring->phys, true);
ring->dma = NULL;
}
@@ -2746,14 +2763,9 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
const struct mtk_soc_data *soc = eth->soc;
struct mtk_rx_ring *ring;
- int rx_data_len, rx_dma_size, tx_ring_size;
+ int rx_data_len, rx_dma_size;
int i;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
- tx_ring_size = MTK_QDMA_RING_SIZE;
- else
- tx_ring_size = soc->tx.dma_size;
-
if (rx_flag == MTK_RX_FLAGS_QDMA) {
if (ring_no)
return -EINVAL;
@@ -2788,20 +2800,10 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
ring->page_pool = pp;
}
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
- rx_flag != MTK_RX_FLAGS_NORMAL) {
- ring->dma = dma_alloc_coherent(eth->dma_dev,
- rx_dma_size * eth->soc->rx.desc_size,
- &ring->phys, GFP_KERNEL);
- } else {
- struct mtk_tx_ring *tx_ring = &eth->tx_ring;
-
- ring->dma = tx_ring->dma + tx_ring_size *
- eth->soc->tx.desc_size * (ring_no + 1);
- ring->phys = tx_ring->phys + tx_ring_size *
- eth->soc->tx.desc_size * (ring_no + 1);
- }
-
+ ring->dma = mtk_dma_ring_alloc(eth,
+ rx_dma_size * eth->soc->rx.desc_size,
+ &ring->phys,
+ rx_flag == MTK_RX_FLAGS_NORMAL);
if (!ring->dma)
return -ENOMEM;
@@ -2916,10 +2918,9 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_
ring->data = NULL;
}
- if (!in_sram && ring->dma) {
- dma_free_coherent(eth->dma_dev,
- ring->dma_size * eth->soc->rx.desc_size,
- ring->dma, ring->phys);
+ if (ring->dma) {
+ mtk_dma_ring_free(eth, ring->dma_size * eth->soc->rx.desc_size,
+ ring->dma, ring->phys, in_sram);
ring->dma = NULL;
}
@@ -3287,15 +3288,16 @@ static void mtk_dma_free(struct mtk_eth *eth)
netdev_tx_reset_subqueue(eth->netdev[i], j);
}
- if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
- dma_free_coherent(eth->dma_dev,
- MTK_QDMA_RING_SIZE * soc->tx.desc_size,
- eth->scratch_ring, eth->phy_scratch_ring);
+ if (eth->scratch_ring) {
+ mtk_dma_ring_free(eth, soc->tx.fq_dma_size * soc->tx.desc_size,
+ eth->scratch_ring, eth->phy_scratch_ring,
+ true);
eth->scratch_ring = NULL;
eth->phy_scratch_ring = 0;
}
+
mtk_tx_clean(eth);
- mtk_rx_clean(eth, &eth->rx_ring[0], MTK_HAS_CAPS(soc->caps, MTK_SRAM));
+ mtk_rx_clean(eth, &eth->rx_ring[0], true);
mtk_rx_clean(eth, &eth->rx_ring_qdma, false);
if (eth->hwlro) {
@@ -3336,6 +3338,53 @@ static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
schedule_work(&eth->pending_work);
}
+static int mtk_get_irqs(struct platform_device *pdev, struct mtk_eth *eth)
+{
+ int i;
+
+ /* future SoCs beginning with MT7988 should use named IRQs in dts */
+ eth->irq[MTK_FE_IRQ_TX] = platform_get_irq_byname_optional(pdev, "fe1");
+ eth->irq[MTK_FE_IRQ_RX] = platform_get_irq_byname_optional(pdev, "fe2");
+ if (eth->irq[MTK_FE_IRQ_TX] >= 0 && eth->irq[MTK_FE_IRQ_RX] >= 0)
+ return 0;
+
+ /* only use legacy mode if platform_get_irq_byname_optional returned -ENXIO */
+ if (eth->irq[MTK_FE_IRQ_TX] != -ENXIO)
+ return dev_err_probe(&pdev->dev, eth->irq[MTK_FE_IRQ_TX],
+ "Error requesting FE TX IRQ\n");
+
+ if (eth->irq[MTK_FE_IRQ_RX] != -ENXIO)
+ return dev_err_probe(&pdev->dev, eth->irq[MTK_FE_IRQ_RX],
+ "Error requesting FE RX IRQ\n");
+
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT))
+ dev_warn(&pdev->dev, "legacy DT: missing interrupt-names.");
+
+ /* legacy way:
+ * On MTK_SHARED_INT SoCs (MT7621 + MT7628) the first IRQ is taken
+ * from devicetree and used for both RX and TX - it is shared.
+ * On SoCs with non-shared IRQs the first entry is not used,
+ * the second is for TX, and the third is for RX.
+ */
+ for (i = 0; i < MTK_FE_IRQ_NUM; i++) {
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
+ if (i == MTK_FE_IRQ_SHARED)
+ eth->irq[MTK_FE_IRQ_SHARED] = platform_get_irq(pdev, i);
+ else
+ eth->irq[i] = eth->irq[MTK_FE_IRQ_SHARED];
+ } else {
+ eth->irq[i] = platform_get_irq(pdev, i + 1);
+ }
+
+ if (eth->irq[i] < 0) {
+ dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
+ return -ENXIO;
+ }
+ }
+
+ return 0;
+}
+
static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
{
struct mtk_eth *eth = _eth;
@@ -3389,7 +3438,7 @@ static void mtk_poll_controller(struct net_device *dev)
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
- mtk_handle_irq_rx(eth->irq[2], dev);
+ mtk_handle_irq_rx(eth->irq[MTK_FE_IRQ_RX], dev);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
}
@@ -4875,7 +4924,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
eth->netdev[id]->features |= eth->soc->hw_features;
eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
- eth->netdev[id]->irq = eth->irq[0];
+ eth->netdev[id]->irq = eth->irq[MTK_FE_IRQ_SHARED];
eth->netdev[id]->dev.of_node = np;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
@@ -4918,7 +4967,7 @@ void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
list_add_tail(&dev->close_list, &dev_list);
}
- dev_close_many(&dev_list, false);
+ netif_close_many(&dev_list, false);
eth->dma_dev = dma_dev;
@@ -4960,9 +5009,30 @@ static int mtk_sgmii_init(struct mtk_eth *eth)
return 0;
}
+static int mtk_setup_legacy_sram(struct mtk_eth *eth, struct resource *res)
+{
+ dev_warn(eth->dev, "legacy DT: using hard-coded SRAM offset.\n");
+
+ if (res->start + MTK_ETH_SRAM_OFFSET + MTK_ETH_NETSYS_V2_SRAM_SIZE - 1 >
+ res->end)
+ return -EINVAL;
+
+ eth->sram_pool = devm_gen_pool_create(eth->dev,
+ const_ilog2(MTK_ETH_SRAM_GRANULARITY),
+ NUMA_NO_NODE, dev_name(eth->dev));
+
+ if (IS_ERR(eth->sram_pool))
+ return PTR_ERR(eth->sram_pool);
+
+ return gen_pool_add_virt(eth->sram_pool,
+ (unsigned long)eth->base + MTK_ETH_SRAM_OFFSET,
+ res->start + MTK_ETH_SRAM_OFFSET,
+ MTK_ETH_NETSYS_V2_SRAM_SIZE, NUMA_NO_NODE);
+}
+
static int mtk_probe(struct platform_device *pdev)
{
- struct resource *res = NULL, *res_sram;
+ struct resource *res = NULL;
struct device_node *mac_np;
struct mtk_eth *eth;
int err, i;
@@ -4982,20 +5052,6 @@ static int mtk_probe(struct platform_device *pdev)
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
eth->ip_align = NET_IP_ALIGN;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
- /* SRAM is actual memory and supports transparent access just like DRAM.
- * Hence we don't require __iomem being set and don't need to use accessor
- * functions to read from or write to SRAM.
- */
- if (mtk_is_netsys_v3_or_greater(eth)) {
- eth->sram_base = (void __force *)devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(eth->sram_base))
- return PTR_ERR(eth->sram_base);
- } else {
- eth->sram_base = (void __force *)eth->base + MTK_ETH_SRAM_OFFSET;
- }
- }
-
if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
if (!err)
@@ -5070,16 +5126,21 @@ static int mtk_probe(struct platform_device *pdev)
err = -EINVAL;
goto err_destroy_sgmii;
}
+
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
- if (mtk_is_netsys_v3_or_greater(eth)) {
- res_sram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res_sram) {
+ eth->sram_pool = of_gen_pool_get(pdev->dev.of_node,
+ "sram", 0);
+ if (!eth->sram_pool) {
+ if (!mtk_is_netsys_v3_or_greater(eth)) {
+ err = mtk_setup_legacy_sram(eth, res);
+ if (err)
+ goto err_destroy_sgmii;
+ } else {
+ dev_err(&pdev->dev,
+ "Could not get SRAM pool\n");
err = -EINVAL;
goto err_destroy_sgmii;
}
- eth->phy_scratch_ring = res_sram->start;
- } else {
- eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
}
}
}
@@ -5105,17 +5166,10 @@ static int mtk_probe(struct platform_device *pdev)
}
}
- for (i = 0; i < 3; i++) {
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
- eth->irq[i] = eth->irq[0];
- else
- eth->irq[i] = platform_get_irq(pdev, i);
- if (eth->irq[i] < 0) {
- dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
- err = -ENXIO;
- goto err_wed_exit;
- }
- }
+ err = mtk_get_irqs(pdev, eth);
+ if (err)
+ goto err_wed_exit;
+
for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
eth->clks[i] = devm_clk_get(eth->dev,
mtk_clks_source_name[i]);
@@ -5159,17 +5213,17 @@ static int mtk_probe(struct platform_device *pdev)
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
- err = devm_request_irq(eth->dev, eth->irq[0],
+ err = devm_request_irq(eth->dev, eth->irq[MTK_FE_IRQ_SHARED],
mtk_handle_irq, 0,
dev_name(eth->dev), eth);
} else {
- err = devm_request_irq(eth->dev, eth->irq[1],
+ err = devm_request_irq(eth->dev, eth->irq[MTK_FE_IRQ_TX],
mtk_handle_irq_tx, 0,
dev_name(eth->dev), eth);
if (err)
goto err_free_dev;
- err = devm_request_irq(eth->dev, eth->irq[2],
+ err = devm_request_irq(eth->dev, eth->irq[MTK_FE_IRQ_RX],
mtk_handle_irq_rx, 0,
dev_name(eth->dev), eth);
}
@@ -5215,7 +5269,7 @@ static int mtk_probe(struct platform_device *pdev)
} else
netif_info(eth, probe, eth->netdev[i],
"mediatek frame engine at 0x%08lx, irq %d\n",
- eth->netdev[i]->base_addr, eth->irq[0]);
+ eth->netdev[i]->base_addr, eth->irq[MTK_FE_IRQ_SHARED]);
}
/* we run 2 devices on the same DMA ring so we need a dummy device
@@ -5556,3 +5610,4 @@ module_platform_driver(mtk_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
+MODULE_IMPORT_NS("NETDEV_INTERNAL");
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 6f72a8c8ae1e..0168e2fbc619 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -141,8 +141,10 @@
#define MTK_GDMA_MAC_ADRH(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \
0x54C : 0x50C + (_x * 0x1000); })
-/* Internal SRAM offset */
-#define MTK_ETH_SRAM_OFFSET 0x40000
+/* legacy DT support for internal SRAM */
+#define MTK_ETH_SRAM_OFFSET 0x40000
+#define MTK_ETH_SRAM_GRANULARITY 32
+#define MTK_ETH_NETSYS_V2_SRAM_SIZE 0x40000
/* FE global misc reg*/
#define MTK_FE_GLO_MISC 0x124
@@ -642,6 +644,11 @@
#define MTK_MAC_FSM(x) (0x1010C + ((x) * 0x100))
+#define MTK_FE_IRQ_SHARED 0
+#define MTK_FE_IRQ_TX 0
+#define MTK_FE_IRQ_RX 1
+#define MTK_FE_IRQ_NUM (MTK_FE_IRQ_RX + 1)
+
struct mtk_rx_dma {
unsigned int rxd1;
unsigned int rxd2;
@@ -1238,8 +1245,9 @@ struct mtk_soc_data {
/* struct mtk_eth - This is the main datasructure for holding the state
* of the driver
* @dev: The device pointer
- * @dev: The device pointer used for dma mapping/alloc
+ * @dma_dev: The device pointer used for dma mapping/alloc
* @base: The mapped register i/o base
+ * @sram_pool: Pointer to SRAM pool used for DMA descriptor rings
* @page_lock: Make sure that register operations are atomic
* @tx_irq__lock: Make sure that IRQ register operations are atomic
* @rx_irq__lock: Make sure that IRQ register operations are atomic
@@ -1285,14 +1293,14 @@ struct mtk_eth {
struct device *dev;
struct device *dma_dev;
void __iomem *base;
- void *sram_base;
+ struct gen_pool *sram_pool;
spinlock_t page_lock;
spinlock_t tx_irq_lock;
spinlock_t rx_irq_lock;
struct net_device *dummy_dev;
struct net_device *netdev[MTK_MAX_DEVS];
struct mtk_mac *mac[MTK_MAX_DEVS];
- int irq[3];
+ int irq[MTK_FE_IRQ_NUM];
u32 msg_enable;
unsigned long sysclk;
struct regmap *ethsys;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index 351dd152f4f3..73c26fcfd85e 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -1318,26 +1318,14 @@ mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
static int
mtk_wed_rro_alloc(struct mtk_wed_device *dev)
{
- struct reserved_mem *rmem;
- struct device_node *np;
- int index;
+ struct resource res;
+ int ret;
- index = of_property_match_string(dev->hw->node, "memory-region-names",
- "wo-dlm");
- if (index < 0)
- return index;
-
- np = of_parse_phandle(dev->hw->node, "memory-region", index);
- if (!np)
- return -ENODEV;
-
- rmem = of_reserved_mem_lookup(np);
- of_node_put(np);
-
- if (!rmem)
- return -ENODEV;
+ ret = of_reserved_mem_region_to_resource_byname(dev->hw->node, "wo-dlm", &res);
+ if (ret)
+ return ret;
- dev->rro.miod_phys = rmem->base;
+ dev->rro.miod_phys = res.start;
dev->rro.fdbk_phys = MTK_WED_MIOD_COUNT + dev->rro.miod_phys;
return mtk_wed_rro_ring_alloc(dev, &dev->rro.ring,
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
index c06e5ad18b01..fa6b21603416 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
@@ -234,27 +234,23 @@ int mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data,
}
static int
-mtk_wed_get_memory_region(struct mtk_wed_hw *hw, int index,
+mtk_wed_get_memory_region(struct mtk_wed_hw *hw, const char *name,
struct mtk_wed_wo_memory_region *region)
{
- struct reserved_mem *rmem;
- struct device_node *np;
-
- np = of_parse_phandle(hw->node, "memory-region", index);
- if (!np)
- return -ENODEV;
-
- rmem = of_reserved_mem_lookup(np);
- of_node_put(np);
+ struct resource res;
+ int ret;
- if (!rmem)
- return -ENODEV;
+ ret = of_reserved_mem_region_to_resource_byname(hw->node, name, &res);
+ if (ret)
+ return 0;
- region->phy_addr = rmem->base;
- region->size = rmem->size;
- region->addr = devm_ioremap(hw->dev, region->phy_addr, region->size);
+ region->phy_addr = res.start;
+ region->size = resource_size(&res);
+ region->addr = devm_ioremap_resource(hw->dev, &res);
+ if (IS_ERR(region->addr))
+ return PTR_ERR(region->addr);
- return !region->addr ? -EINVAL : 0;
+ return 0;
}
static int
@@ -319,13 +315,7 @@ mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo)
/* load firmware region metadata */
for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
- int index = of_property_match_string(wo->hw->node,
- "memory-region-names",
- mem_region[i].name);
- if (index < 0)
- continue;
-
- ret = mtk_wed_get_memory_region(wo->hw, index, &mem_region[i]);
+ ret = mtk_wed_get_memory_region(wo->hw, mem_region[i].name, &mem_region[i]);
if (ret)
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index d73a2044dc26..2aeaafcfb993 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -38,7 +38,7 @@
/* mlx4_en_read_clock - read raw cycle counter (to be used by time counter)
*/
-static u64 mlx4_en_read_clock(const struct cyclecounter *tc)
+static u64 mlx4_en_read_clock(struct cyclecounter *tc)
{
struct mlx4_en_dev *mdev =
container_of(tc, struct mlx4_en_dev, cycles);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index 752a72499b4f..be80da03a594 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -290,9 +290,6 @@ static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev,
struct mlx4_en_priv *priv = netdev_priv(dev);
struct ieee_ets *my_ets = &priv->ets;
- if (!my_ets)
- return -EINVAL;
-
ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
ets->cbs = my_ets->cbs;
memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 281b34af0bb4..d2071aff7b8f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2670,8 +2670,7 @@ static int mlx4_udp_tunnel_sync(struct net_device *dev, unsigned int table)
static const struct udp_tunnel_nic_info mlx4_udp_tunnels = {
.sync_table = mlx4_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
},
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index b33285d755b9..92a16ddb7d86 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -460,9 +460,11 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
truesize += frag_info->frag_stride;
if (frag_info->frag_stride == PAGE_SIZE / 2) {
+ struct netmem_desc *desc = pp_page_to_nmdesc(page);
+
frags->page_offset ^= PAGE_SIZE / 2;
release = page_count(page) != 1 ||
- atomic_long_read(&page->pp_ref_count) != 1 ||
+ atomic_long_read(&desc->pp_ref_count) != 1 ||
page_is_pfmemalloc(page) ||
page_to_nid(page) != numa_mem_id();
} else if (!priv->rx_headroom) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index febeadfdd5a5..03d2fc7d9b09 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -49,6 +49,8 @@
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
+#include <rdma/ib_verbs.h>
+
#include "mlx4.h"
#include "fw.h"
#include "icm.h"
@@ -1246,14 +1248,6 @@ err_out:
return err ? err : count;
}
-enum ibta_mtu {
- IB_MTU_256 = 1,
- IB_MTU_512 = 2,
- IB_MTU_1024 = 3,
- IB_MTU_2048 = 4,
- IB_MTU_4096 = 5
-};
-
static inline int int_to_ibta_mtu(int mtu)
{
switch (mtu) {
@@ -1266,7 +1260,7 @@ static inline int int_to_ibta_mtu(int mtu)
}
}
-static inline int ibta_mtu_to_int(enum ibta_mtu mtu)
+static inline int ibta_mtu_to_int(enum ib_mtu mtu)
{
switch (mtu) {
case IB_MTU_256: return 256;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index d292e6a9e22c..650df18a9216 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -29,7 +29,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/rqt.o en/tir.o en/rss.o en/rx_res.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
en/qos.o en/htb.o en/trap.o en/fs_tt_redirect.o en/selq.o \
- lib/crypto.o lib/sd.o
+ lib/crypto.o lib/sd.o en/pcie_cong_event.o
#
# Netdev extra
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index b1aeea7c4a91..e395ef5f356e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1947,8 +1947,8 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
pages_queue, token, force_polling);
- if (callback)
- return err;
+ if (callback && !err)
+ return 0;
if (err > 0) /* Failed in FW, command didn't execute */
err = deliv_status_to_err(err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 73cd74644378..3ffa3fbacd16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -35,6 +35,55 @@ static u16 mlx5_fw_ver_subminor(u32 version)
return version & 0xffff;
}
+static int mlx5_devlink_serial_numbers_put(struct mlx5_core_dev *dev,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct pci_dev *pdev = dev->pdev;
+ unsigned int vpd_size, kw_len;
+ char *str, *end;
+ u8 *vpd_data;
+ int err = 0;
+ int start;
+
+ vpd_data = pci_vpd_alloc(pdev, &vpd_size);
+ if (IS_ERR(vpd_data))
+ return 0;
+
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len);
+ if (start >= 0) {
+ str = kstrndup(vpd_data + start, kw_len, GFP_KERNEL);
+ if (!str) {
+ err = -ENOMEM;
+ goto end;
+ }
+ end = strchrnul(str, ' ');
+ *end = '\0';
+ err = devlink_info_board_serial_number_put(req, str);
+ kfree(str);
+ if (err)
+ goto end;
+ }
+
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, "V3", &kw_len);
+ if (start >= 0) {
+ str = kstrndup(vpd_data + start, kw_len, GFP_KERNEL);
+ if (!str) {
+ err = -ENOMEM;
+ goto end;
+ }
+ err = devlink_info_serial_number_put(req, str);
+ kfree(str);
+ if (err)
+ goto end;
+ }
+
+end:
+ kfree(vpd_data);
+ return err;
+}
+
#define DEVLINK_FW_STRING_LEN 32
static int
@@ -49,6 +98,10 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
if (!mlx5_core_is_pf(dev))
return 0;
+ err = mlx5_devlink_serial_numbers_put(dev, req, extack);
+ if (err)
+ return err;
+
err = devlink_info_version_fixed_put(req, "fw.psid", dev->board_id);
if (err)
return err;
@@ -323,6 +376,8 @@ static const struct devlink_ops mlx5_devlink_ops = {
.eswitch_encap_mode_get = mlx5_devlink_eswitch_encap_mode_get,
.rate_leaf_tx_share_set = mlx5_esw_devlink_rate_leaf_tx_share_set,
.rate_leaf_tx_max_set = mlx5_esw_devlink_rate_leaf_tx_max_set,
+ .rate_leaf_tc_bw_set = mlx5_esw_devlink_rate_leaf_tc_bw_set,
+ .rate_node_tc_bw_set = mlx5_esw_devlink_rate_node_tc_bw_set,
.rate_node_tx_share_set = mlx5_esw_devlink_rate_node_tx_share_set,
.rate_node_tx_max_set = mlx5_esw_devlink_rate_node_tx_max_set,
.rate_node_new = mlx5_esw_devlink_rate_node_new,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 5b0d03b3efe8..0dd3bc0f4caa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -84,9 +84,10 @@ struct page_pool;
#define MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE (9)
#define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE (PAGE_SIZE >> MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE)
#define MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE (PAGE_SHIFT - MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE)
-#define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE (64)
-#define MLX5E_SHAMPO_WQ_RESRV_SIZE (64 * 1024)
-#define MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE (4096)
+#define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE_SHIFT (6)
+#define MLX5E_SHAMPO_WQ_RESRV_SIZE_BASE_SHIFT (12)
+#define MLX5E_SHAMPO_WQ_LOG_RESRV_SIZE (16)
+#define MLX5E_SHAMPO_WQ_RESRV_SIZE BIT(MLX5E_SHAMPO_WQ_LOG_RESRV_SIZE)
#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
(6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
@@ -278,10 +279,6 @@ enum packet_merge {
struct mlx5e_packet_merge_param {
enum packet_merge type;
u32 timeout;
- struct {
- u8 match_criteria_type;
- u8 alignment_granularity;
- } shampo;
};
struct mlx5e_params {
@@ -378,7 +375,7 @@ struct mlx5e_sq_dma {
enum mlx5e_dma_map_type type;
};
-/* Keep this enum consistent with with the corresponding strings array
+/* Keep this enum consistent with the corresponding strings array
* declared in en/reporter_tx.c
*/
enum {
@@ -387,7 +384,6 @@ enum {
MLX5E_SQ_STATE_RECOVERING,
MLX5E_SQ_STATE_IPSEC,
MLX5E_SQ_STATE_DIM,
- MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
MLX5E_SQ_STATE_PENDING_XSK_TX,
MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
MLX5E_NUM_SQ_STATES, /* Must be kept last */
@@ -557,7 +553,7 @@ struct mlx5e_icosq {
} ____cacheline_aligned_in_smp;
struct mlx5e_frag_page {
- struct page *page;
+ netmem_ref netmem;
u16 frags;
};
@@ -634,15 +630,13 @@ struct mlx5e_dma_info {
};
struct mlx5e_shampo_hd {
- u32 mkey;
struct mlx5e_frag_page *pages;
u32 hd_per_wq;
u16 hd_per_wqe;
- u16 pages_per_wq;
unsigned long *bitmap;
u16 pi;
u16 ci;
- __be32 key;
+ __be32 mkey_be;
};
struct mlx5e_hw_gro_data {
@@ -721,13 +715,18 @@ struct mlx5e_rq {
struct bpf_prog __rcu *xdp_prog;
struct mlx5e_xdpsq *xdpsq;
DECLARE_BITMAP(flags, 8);
+
+ /* page pools */
struct page_pool *page_pool;
+ struct page_pool *hd_page_pool;
+
struct mlx5e_xdp_buff mxbuf;
/* AF_XDP zero-copy */
struct xsk_buff_pool *xsk_pool;
struct work_struct recover_work;
+ struct work_struct rx_timeout_work;
/* control */
struct mlx5_wq_ctrl wq_ctrl;
@@ -922,6 +921,8 @@ struct mlx5e_priv {
struct notifier_block events_nb;
struct notifier_block blocking_events_nb;
+ struct mlx5e_pcie_cong_event *cong_event;
+
struct udp_tunnel_nic_info nic_info;
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx dcbx;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index b5c3a2a9d2a5..9560fcba643f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -18,7 +18,8 @@ enum {
enum {
MLX5E_TC_PRIO = 0,
- MLX5E_NIC_PRIO
+ MLX5E_PROMISC_PRIO,
+ MLX5E_NIC_PRIO,
};
struct mlx5e_flow_table {
@@ -68,9 +69,13 @@ struct mlx5e_l2_table {
MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_IPSEC_SPI)
-/* NIC prio FTS */
+/* NIC promisc FT level */
enum {
MLX5E_PROMISC_FT_LEVEL,
+};
+
+/* NIC prio FTS */
+enum {
MLX5E_VLAN_FT_LEVEL,
MLX5E_L2_FT_LEVEL,
MLX5E_TTC_FT_LEVEL,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h
index 9e276fd3c0cf..c21fe36527a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h
@@ -11,6 +11,11 @@ int mlx5e_ethtool_alloc(struct mlx5e_ethtool_steering **ethtool);
void mlx5e_ethtool_free(struct mlx5e_ethtool_steering *ethtool);
void mlx5e_ethtool_init_steering(struct mlx5e_flow_steering *fs);
void mlx5e_ethtool_cleanup_steering(struct mlx5e_flow_steering *fs);
+int mlx5e_ethtool_set_rxfh_fields(struct mlx5e_priv *priv,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack);
+int mlx5e_ethtool_get_rxfh_fields(struct mlx5e_priv *priv,
+ struct ethtool_rxfh_fields *nfc);
int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd);
int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, u32 *rule_locs);
@@ -20,6 +25,15 @@ static inline int mlx5e_ethtool_alloc(struct mlx5e_ethtool_steering **ethtool)
static inline void mlx5e_ethtool_free(struct mlx5e_ethtool_steering *ethtool) { }
static inline void mlx5e_ethtool_init_steering(struct mlx5e_flow_steering *fs) { }
static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_flow_steering *fs) { }
+static inline int
+mlx5e_ethtool_set_rxfh_fields(struct mlx5e_priv *priv,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
+{ return -EOPNOTSUPP; }
+static inline int
+mlx5e_ethtool_get_rxfh_fields(struct mlx5e_priv *priv,
+ struct ethtool_rxfh_fields *nfc)
+{ return -EOPNOTSUPP; }
static inline int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
{ return -EOPNOTSUPP; }
static inline int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 58ec5e44aa7a..3cca06a74cf9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -414,25 +414,10 @@ u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev,
return params->log_rq_mtu_frames - log_pkts_per_wqe;
}
-u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
+static u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5e_params *params)
{
- return order_base_2(DIV_ROUND_UP(MLX5E_RX_MAX_HEAD, MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE));
-}
-
-u8 mlx5e_shampo_get_log_rsrv_size(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
-{
- return order_base_2(MLX5E_SHAMPO_WQ_RESRV_SIZE / MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE);
-}
-
-u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
-{
- u32 resrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) *
- MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE;
-
- return order_base_2(DIV_ROUND_UP(resrv_size, params->sw_mtu));
+ return order_base_2(DIV_ROUND_UP(MLX5E_SHAMPO_WQ_RESRV_SIZE,
+ params->sw_mtu));
}
u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
@@ -834,13 +819,12 @@ static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
- int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) *
- MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE;
u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
- int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
+ int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(params));
int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
int wqe_size = BIT(log_stride_sz) * num_strides;
+ int rsrv_size = MLX5E_SHAMPO_WQ_RESRV_SIZE;
/* +1 is for the case that the pkt_per_rsrv dont consume the reservation
* so we get a filler cqe for the rest of the reservation.
@@ -901,6 +885,7 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
{
void *rqc = param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ u32 lro_timeout;
int ndsegs = 1;
int err;
@@ -926,22 +911,27 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
MLX5_SET(wq, wq, log_wqe_stride_size,
log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
- if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
- MLX5_SET(wq, wq, shampo_enable, true);
- MLX5_SET(wq, wq, log_reservation_size,
- mlx5e_shampo_get_log_rsrv_size(mdev, params));
- MLX5_SET(wq, wq,
- log_max_num_of_packets_per_reservation,
- mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
- MLX5_SET(wq, wq, log_headers_entry_size,
- mlx5e_shampo_get_log_hd_entry_size(mdev, params));
- MLX5_SET(rqc, rqc, reservation_timeout,
- mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_SHAMPO_TIMEOUT));
- MLX5_SET(rqc, rqc, shampo_match_criteria_type,
- params->packet_merge.shampo.match_criteria_type);
- MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity,
- params->packet_merge.shampo.alignment_granularity);
- }
+ if (params->packet_merge.type != MLX5E_PACKET_MERGE_SHAMPO)
+ break;
+
+ MLX5_SET(wq, wq, shampo_enable, true);
+ MLX5_SET(wq, wq, log_reservation_size,
+ MLX5E_SHAMPO_WQ_LOG_RESRV_SIZE -
+ MLX5E_SHAMPO_WQ_RESRV_SIZE_BASE_SHIFT);
+ MLX5_SET(wq, wq,
+ log_max_num_of_packets_per_reservation,
+ mlx5e_shampo_get_log_pkt_per_rsrv(params));
+ MLX5_SET(wq, wq, log_headers_entry_size,
+ MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE -
+ MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE_SHIFT);
+ lro_timeout =
+ mlx5e_choose_lro_timeout(mdev,
+ MLX5E_DEFAULT_SHAMPO_TIMEOUT);
+ MLX5_SET(rqc, rqc, reservation_timeout, lro_timeout);
+ MLX5_SET(rqc, rqc, shampo_match_criteria_type,
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED);
+ MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity,
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE);
break;
}
default: /* MLX5_WQ_TYPE_CYCLIC */
@@ -1044,18 +1034,17 @@ u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rq_param)
{
- int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) *
- MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE;
u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, NULL));
- int pkt_per_resv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL);
+ int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(params));
int wqe_size = BIT(log_stride_sz) * num_strides;
+ int rsrv_size = MLX5E_SHAMPO_WQ_RESRV_SIZE;
u32 hd_per_wqe;
/* Assumption: hd_per_wqe % 8 == 0. */
- hd_per_wqe = (wqe_size / resv_size) * pkt_per_resv;
- mlx5_core_dbg(mdev, "%s hd_per_wqe = %d rsrv_size = %d wqe_size = %d pkt_per_resv = %d\n",
- __func__, hd_per_wqe, resv_size, wqe_size, pkt_per_resv);
+ hd_per_wqe = (wqe_size / rsrv_size) * pkt_per_rsrv;
+ mlx5_core_dbg(mdev, "%s hd_per_wqe = %d rsrv_size = %d wqe_size = %d pkt_per_rsrv = %d\n",
+ __func__, hd_per_wqe, rsrv_size, wqe_size, pkt_per_rsrv);
return hd_per_wqe;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index bd5877acc5b1..488ccdbc1e2c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -95,12 +95,6 @@ bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
-u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params);
-u8 mlx5e_shampo_get_log_rsrv_size(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params);
-u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params);
u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rq_param);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c b/drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c
new file mode 100644
index 000000000000..0ed017569a19
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES.
+
+#include "en.h"
+#include "pcie_cong_event.h"
+
+#define MLX5E_CONG_HIGH_STATE 0x7
+
+enum {
+ MLX5E_INBOUND_CONG = BIT(0),
+ MLX5E_OUTBOUND_CONG = BIT(1),
+};
+
+struct mlx5e_pcie_cong_thresh {
+ u16 inbound_high;
+ u16 inbound_low;
+ u16 outbound_high;
+ u16 outbound_low;
+};
+
+struct mlx5e_pcie_cong_stats {
+ u32 pci_bw_inbound_high;
+ u32 pci_bw_inbound_low;
+ u32 pci_bw_outbound_high;
+ u32 pci_bw_outbound_low;
+};
+
+struct mlx5e_pcie_cong_event {
+ u64 obj_id;
+
+ struct mlx5e_priv *priv;
+
+ /* For event notifier and workqueue. */
+ struct work_struct work;
+ struct mlx5_nb nb;
+
+ /* Stores last read state. */
+ u8 state;
+
+ /* For ethtool stats group. */
+ struct mlx5e_pcie_cong_stats stats;
+};
+
+/* In units of 0.01 % */
+static const struct mlx5e_pcie_cong_thresh default_thresh_config = {
+ .inbound_high = 9000,
+ .inbound_low = 7500,
+ .outbound_high = 9000,
+ .outbound_low = 7500,
+};
+
+static const struct counter_desc mlx5e_pcie_cong_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_pcie_cong_stats,
+ pci_bw_inbound_high) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_pcie_cong_stats,
+ pci_bw_inbound_low) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_pcie_cong_stats,
+ pci_bw_outbound_high) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_pcie_cong_stats,
+ pci_bw_outbound_low) },
+};
+
+#define NUM_PCIE_CONG_COUNTERS ARRAY_SIZE(mlx5e_pcie_cong_stats_desc)
+
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie_cong)
+{
+ return priv->cong_event ? NUM_PCIE_CONG_COUNTERS : 0;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie_cong) {}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie_cong)
+{
+ if (!priv->cong_event)
+ return;
+
+ for (int i = 0; i < NUM_PCIE_CONG_COUNTERS; i++)
+ ethtool_puts(data, mlx5e_pcie_cong_stats_desc[i].format);
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie_cong)
+{
+ if (!priv->cong_event)
+ return;
+
+ for (int i = 0; i < NUM_PCIE_CONG_COUNTERS; i++) {
+ u32 ctr = MLX5E_READ_CTR32_CPU(&priv->cong_event->stats,
+ mlx5e_pcie_cong_stats_desc,
+ i);
+
+ mlx5e_ethtool_put_stat(data, ctr);
+ }
+}
+
+MLX5E_DEFINE_STATS_GRP(pcie_cong, 0);
+
+static int
+mlx5_cmd_pcie_cong_event_set(struct mlx5_core_dev *dev,
+ const struct mlx5e_pcie_cong_thresh *config,
+ u64 *obj_id)
+{
+ u32 in[MLX5_ST_SZ_DW(pcie_cong_event_cmd_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ void *cong_obj;
+ void *hdr;
+ int err;
+
+ hdr = MLX5_ADDR_OF(pcie_cong_event_cmd_in, in, hdr);
+ cong_obj = MLX5_ADDR_OF(pcie_cong_event_cmd_in, in, cong_obj);
+
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT);
+
+ MLX5_SET(pcie_cong_event_obj, cong_obj, inbound_event_en, 1);
+ MLX5_SET(pcie_cong_event_obj, cong_obj, outbound_event_en, 1);
+
+ MLX5_SET(pcie_cong_event_obj, cong_obj,
+ inbound_cong_high_threshold, config->inbound_high);
+ MLX5_SET(pcie_cong_event_obj, cong_obj,
+ inbound_cong_low_threshold, config->inbound_low);
+
+ MLX5_SET(pcie_cong_event_obj, cong_obj,
+ outbound_cong_high_threshold, config->outbound_high);
+ MLX5_SET(pcie_cong_event_obj, cong_obj,
+ outbound_cong_low_threshold, config->outbound_low);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+ mlx5_core_dbg(dev, "PCIe congestion event (obj_id=%llu) created. Config: in: [%u, %u], out: [%u, %u]\n",
+ *obj_id,
+ config->inbound_high, config->inbound_low,
+ config->outbound_high, config->outbound_low);
+
+ return 0;
+}
+
+static int mlx5_cmd_pcie_cong_event_destroy(struct mlx5_core_dev *dev,
+ u64 obj_id)
+{
+ u32 in[MLX5_ST_SZ_DW(pcie_cong_event_cmd_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ void *hdr;
+
+ hdr = MLX5_ADDR_OF(pcie_cong_event_cmd_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
+ MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, obj_id);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_cmd_pcie_cong_event_query(struct mlx5_core_dev *dev,
+ u64 obj_id,
+ u32 *state)
+{
+ u32 in[MLX5_ST_SZ_DW(pcie_cong_event_cmd_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(pcie_cong_event_cmd_out)];
+ void *obj;
+ void *hdr;
+ u8 cong;
+ int err;
+
+ hdr = MLX5_ADDR_OF(pcie_cong_event_cmd_in, in, hdr);
+
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
+ MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, obj_id);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ obj = MLX5_ADDR_OF(pcie_cong_event_cmd_out, out, cong_obj);
+
+ if (state) {
+ cong = MLX5_GET(pcie_cong_event_obj, obj, inbound_cong_state);
+ if (cong == MLX5E_CONG_HIGH_STATE)
+ *state |= MLX5E_INBOUND_CONG;
+
+ cong = MLX5_GET(pcie_cong_event_obj, obj, outbound_cong_state);
+ if (cong == MLX5E_CONG_HIGH_STATE)
+ *state |= MLX5E_OUTBOUND_CONG;
+ }
+
+ return 0;
+}
+
+static void mlx5e_pcie_cong_event_work(struct work_struct *work)
+{
+ struct mlx5e_pcie_cong_event *cong_event;
+ struct mlx5_core_dev *dev;
+ struct mlx5e_priv *priv;
+ u32 new_cong_state = 0;
+ u32 changes;
+ int err;
+
+ cong_event = container_of(work, struct mlx5e_pcie_cong_event, work);
+ priv = cong_event->priv;
+ dev = priv->mdev;
+
+ err = mlx5_cmd_pcie_cong_event_query(dev, cong_event->obj_id,
+ &new_cong_state);
+ if (err) {
+ mlx5_core_warn(dev, "Error %d when querying PCIe cong event object (obj_id=%llu).\n",
+ err, cong_event->obj_id);
+ return;
+ }
+
+ changes = cong_event->state ^ new_cong_state;
+ if (!changes)
+ return;
+
+ cong_event->state = new_cong_state;
+
+ if (changes & MLX5E_INBOUND_CONG) {
+ if (new_cong_state & MLX5E_INBOUND_CONG)
+ cong_event->stats.pci_bw_inbound_high++;
+ else
+ cong_event->stats.pci_bw_inbound_low++;
+ }
+
+ if (changes & MLX5E_OUTBOUND_CONG) {
+ if (new_cong_state & MLX5E_OUTBOUND_CONG)
+ cong_event->stats.pci_bw_outbound_high++;
+ else
+ cong_event->stats.pci_bw_outbound_low++;
+ }
+}
+
+static int mlx5e_pcie_cong_event_handler(struct notifier_block *nb,
+ unsigned long event, void *eqe)
+{
+ struct mlx5e_pcie_cong_event *cong_event;
+
+ cong_event = mlx5_nb_cof(nb, struct mlx5e_pcie_cong_event, nb);
+ queue_work(cong_event->priv->wq, &cong_event->work);
+
+ return NOTIFY_OK;
+}
+
+int mlx5e_pcie_cong_event_init(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pcie_cong_event *cong_event;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int err;
+
+ if (!mlx5_pcie_cong_event_supported(mdev))
+ return 0;
+
+ cong_event = kvzalloc_node(sizeof(*cong_event), GFP_KERNEL,
+ mdev->priv.numa_node);
+ if (!cong_event)
+ return -ENOMEM;
+
+ INIT_WORK(&cong_event->work, mlx5e_pcie_cong_event_work);
+ MLX5_NB_INIT(&cong_event->nb, mlx5e_pcie_cong_event_handler,
+ OBJECT_CHANGE);
+
+ cong_event->priv = priv;
+
+ err = mlx5_cmd_pcie_cong_event_set(mdev, &default_thresh_config,
+ &cong_event->obj_id);
+ if (err) {
+ mlx5_core_warn(mdev, "Error creating a PCIe congestion event object\n");
+ goto err_free;
+ }
+
+ err = mlx5_eq_notifier_register(mdev, &cong_event->nb);
+ if (err) {
+ mlx5_core_warn(mdev, "Error registering notifier for the PCIe congestion event\n");
+ goto err_obj_destroy;
+ }
+
+ priv->cong_event = cong_event;
+
+ return 0;
+
+err_obj_destroy:
+ mlx5_cmd_pcie_cong_event_destroy(mdev, cong_event->obj_id);
+err_free:
+ kvfree(cong_event);
+
+ return err;
+}
+
+void mlx5e_pcie_cong_event_cleanup(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pcie_cong_event *cong_event = priv->cong_event;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ if (!cong_event)
+ return;
+
+ priv->cong_event = NULL;
+
+ mlx5_eq_notifier_unregister(mdev, &cong_event->nb);
+ cancel_work_sync(&cong_event->work);
+
+ if (mlx5_cmd_pcie_cong_event_destroy(mdev, cong_event->obj_id))
+ mlx5_core_warn(mdev, "Error destroying PCIe congestion event (obj_id=%llu)\n",
+ cong_event->obj_id);
+
+ kvfree(cong_event);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.h b/drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.h
new file mode 100644
index 000000000000..b1ea46bf648a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef __MLX5_PCIE_CONG_EVENT_H__
+#define __MLX5_PCIE_CONG_EVENT_H__
+
+int mlx5e_pcie_cong_event_init(struct mlx5e_priv *priv);
+void mlx5e_pcie_cong_event_cleanup(struct mlx5e_priv *priv);
+
+#endif /* __MLX5_PCIE_CONG_EVENT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index 8e25f4ef5ccc..5ae787656a7c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -331,6 +331,9 @@ static int port_set_buffer(struct mlx5e_priv *priv,
if (err)
goto out;
+ /* RO bits should be set to 0 on write */
+ MLX5_SET(pbmc_reg, in, port_buffer_size, 0);
+
err = mlx5e_port_set_pbmc(mdev, in);
out:
kfree(in);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 5d0014129a7e..391b4e9c9dc4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -340,8 +340,6 @@ static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix,
sq->stats = &c->priv->ptp_stats.sq[tc];
sq->ptpsq = ptpsq;
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
- if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
- set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
sq->stop_room = param->stop_room;
sq->ptp_cyc2time = mlx5_sq_ts_translator(mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index f0744a45db92..4e461cb03b83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -374,7 +374,7 @@ void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_que
void mlx5e_reset_qdisc(struct net_device *dev, u16 qid)
{
struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid);
- struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
+ struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
if (!qdisc)
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index e75759533ae0..16c44d628eda 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -170,16 +170,23 @@ static int mlx5e_rx_reporter_err_rq_cqe_recover(void *ctx)
static int mlx5e_rx_reporter_timeout_recover(void *ctx)
{
struct mlx5_eq_comp *eq;
+ struct mlx5e_priv *priv;
struct mlx5e_rq *rq;
int err;
rq = ctx;
+ priv = rq->priv;
+
+ mutex_lock(&priv->state_lock);
+
eq = rq->cq.mcq.eq;
err = mlx5e_health_channel_eq_recover(rq->netdev, eq, rq->cq.ch_stats);
if (err && rq->icosq)
clear_bit(MLX5E_SQ_STATE_ENABLED, &rq->icosq->state);
+ mutex_unlock(&priv->state_lock);
+
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index c3bda4612fa9..85d5cb39b107 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -13,7 +13,6 @@ static const char * const sq_sw_state_type_name[] = {
[MLX5E_SQ_STATE_RECOVERING] = "recovering",
[MLX5E_SQ_STATE_IPSEC] = "ipsec",
[MLX5E_SQ_STATE_DIM] = "dim",
- [MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE] = "vlan_need_l2_inline",
[MLX5E_SQ_STATE_PENDING_XSK_TX] = "pending_xsk_tx",
[MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC] = "pending_tls_rx_resync",
};
@@ -312,6 +311,30 @@ out:
mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
+static void
+mlx5e_tx_reporter_diagnose_tis_config(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
+ u8 num_tc = mlx5e_get_dcb_num_tc(&priv->channels.params);
+ u32 tc, i, tisn;
+
+ devlink_fmsg_arr_pair_nest_start(fmsg, "TIS Config");
+ for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) {
+ for (tc = 0; tc < num_tc; tc++) {
+ tisn = mlx5e_profile_get_tisn(priv->mdev, priv,
+ priv->profile, i, tc);
+
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_u32_pair_put(fmsg, "lag port", i);
+ devlink_fmsg_u32_pair_put(fmsg, "tc", tc);
+ devlink_fmsg_u32_pair_put(fmsg, "tisn", tisn);
+ devlink_fmsg_obj_nest_end(fmsg);
+ }
+ }
+ devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg,
struct netlink_ext_ack *extack)
@@ -327,6 +350,7 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
goto unlock;
mlx5e_tx_reporter_diagnose_common_config(reporter, fmsg);
+ mlx5e_tx_reporter_diagnose_tis_config(reporter, fmsg);
devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
for (i = 0; i < priv->channels.num; i++) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
index 74cd111ee320..c68ba0e58fa6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
@@ -567,7 +567,8 @@ inner_tir:
return final_err;
}
-int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc, bool *symmetric)
+void mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc,
+ bool *symmetric)
{
if (indir)
memcpy(indir, rss->indir.table,
@@ -582,8 +583,6 @@ int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc, bo
if (symmetric)
*symmetric = rss->hash.symmetric;
-
- return 0;
}
int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
index 8ac902190010..c6c1b2847cf5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
@@ -47,7 +47,8 @@ void mlx5e_rss_disable(struct mlx5e_rss *rss);
int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
struct mlx5e_packet_merge_param *pkt_merge_param);
-int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc, bool *symmetric);
+void mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc,
+ bool *symmetric);
int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
const u8 *key, const u8 *hfunc, const bool *symmetric,
u32 *rqns, u32 *vhca_ids, unsigned int num_rqns);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index 5fcbe47337b0..a2acbfee2b77 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -71,17 +71,12 @@ static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
return 0;
}
-int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int init_nch)
+int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 rss_idx, unsigned int init_nch)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
struct mlx5e_rss *rss;
- int i;
-
- for (i = 1; i < MLX5E_MAX_NUM_RSS; i++)
- if (!res->rss[i])
- break;
- if (i == MLX5E_MAX_NUM_RSS)
+ if (WARN_ON_ONCE(res->rss[rss_idx]))
return -ENOSPC;
rss = mlx5e_rss_init(res->mdev, inner_ft_support, res->drop_rqn,
@@ -97,8 +92,7 @@ int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int i
mlx5e_rss_enable(rss, res->rss_rqns, vhca_ids, res->rss_nch);
}
- res->rss[i] = rss;
- *rss_idx = i;
+ res->rss[rss_idx] = rss;
return 0;
}
@@ -193,19 +187,17 @@ void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int n
mlx5e_rss_set_indir_uniform(res->rss[0], nch);
}
-int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
- u32 *indir, u8 *key, u8 *hfunc, bool *symmetric)
+void mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
+ u32 *indir, u8 *key, u8 *hfunc, bool *symmetric)
{
- struct mlx5e_rss *rss;
-
- if (rss_idx >= MLX5E_MAX_NUM_RSS)
- return -EINVAL;
+ struct mlx5e_rss *rss = NULL;
- rss = res->rss[rss_idx];
- if (!rss)
- return -ENOENT;
+ if (rss_idx < MLX5E_MAX_NUM_RSS)
+ rss = res->rss[rss_idx];
+ if (WARN_ON_ONCE(!rss))
+ return;
- return mlx5e_rss_get_rxfh(rss, indir, key, hfunc, symmetric);
+ mlx5e_rss_get_rxfh(rss, indir, key, hfunc, symmetric);
}
int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
@@ -579,8 +571,6 @@ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_chann
for (ix = 0; ix < nch; ix++)
mlx5e_rx_res_channel_activate_direct(res, chs, ix);
- for (ix = nch; ix < res->max_nch; ix++)
- mlx5e_rx_res_channel_deactivate_direct(res, ix);
if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
u32 rqn;
@@ -603,7 +593,7 @@ void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res)
mlx5e_rx_res_rss_disable(res);
- for (ix = 0; ix < res->max_nch; ix++)
+ for (ix = 0; ix < res->rss_nch; ix++)
mlx5e_rx_res_channel_deactivate_direct(res, ix);
if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
index 3e09d91281af..1d049e2aa264 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
@@ -48,8 +48,9 @@ void mlx5e_rx_res_xsk_update(struct mlx5e_rx_res *res, struct mlx5e_channels *ch
/* Configuration API */
void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int nch);
-int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
- u32 *indir, u8 *key, u8 *hfunc, bool *symmetric);
+void mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
+ u32 *indir, u8 *key, u8 *hfunc,
+ bool *symmetric);
int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
const u32 *indir, const u8 *key, const u8 *hfunc,
const bool *symmetric);
@@ -61,7 +62,7 @@ int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
struct mlx5e_packet_merge_param *pkt_merge_param);
-int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int init_nch);
+int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 rss_idx, unsigned int init_nch);
int mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx);
int mlx5e_rx_res_rss_cnt(struct mlx5e_rx_res *res);
int mlx5e_rx_res_rss_index(struct mlx5e_rx_res *res, struct mlx5e_rss *rss);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
index a13c5e707b83..9bdb5820c553 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
@@ -94,29 +94,30 @@ mlx5e_tc_act_vlan_add_push_action(struct mlx5e_priv *priv,
struct net_device **out_dev,
struct netlink_ext_ack *extack)
{
- struct net_device *vlan_dev = *out_dev;
- struct flow_action_entry vlan_act = {
- .id = FLOW_ACTION_VLAN_PUSH,
- .vlan.vid = vlan_dev_vlan_id(vlan_dev),
- .vlan.proto = vlan_dev_vlan_proto(vlan_dev),
- .vlan.prio = 0,
- };
- int err;
-
- err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action, extack, NULL);
- if (err)
- return err;
-
- rcu_read_lock();
- *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), dev_get_iflink(vlan_dev));
- rcu_read_unlock();
- if (!*out_dev)
- return -ENODEV;
+ do {
+ struct net_device *vlan_dev = *out_dev;
+ struct flow_action_entry vlan_act = {
+ .id = FLOW_ACTION_VLAN_PUSH,
+ .vlan.vid = vlan_dev_vlan_id(vlan_dev),
+ .vlan.proto = vlan_dev_vlan_proto(vlan_dev),
+ .vlan.prio = 0,
+ };
+ int err;
+
+ err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr,
+ &attr->action, extack, NULL);
+ if (err)
+ return err;
- if (is_vlan_dev(*out_dev))
- err = mlx5e_tc_act_vlan_add_push_action(priv, attr, out_dev, extack);
+ rcu_read_lock();
+ *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
+ dev_get_iflink(vlan_dev));
+ rcu_read_unlock();
+ if (!*out_dev)
+ return -ENODEV;
+ } while (is_vlan_dev(*out_dev));
- return err;
+ return 0;
}
int
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 81332cd4a582..870d12364f99 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -1195,6 +1195,7 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
struct flow_action_entry *meta_action;
unsigned long cookie = flow->cookie;
struct mlx5_ct_entry *entry;
+ bool has_nat;
int err;
meta_action = mlx5_tc_ct_get_ct_metadata_action(flow_rule);
@@ -1236,6 +1237,8 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
err = mlx5_tc_ct_rule_to_tuple_nat(&entry->tuple_nat, flow_rule);
if (err)
goto err_set;
+ has_nat = memcmp(&entry->tuple, &entry->tuple_nat,
+ sizeof(entry->tuple));
spin_lock_bh(&ct_priv->ht_lock);
@@ -1244,7 +1247,7 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
if (err)
goto err_entries;
- if (memcmp(&entry->tuple, &entry->tuple_nat, sizeof(entry->tuple))) {
+ if (has_nat) {
err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_nat_ht,
&entry->tuple_nat_node,
tuples_nat_ht_params);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index e837c21d3d21..5dc04bbfc71b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -322,14 +322,24 @@ mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
}
static inline void
-mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size,
- enum mlx5e_dma_map_type map_type)
+mlx5e_dma_push_single(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size)
{
struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
dma->addr = addr;
dma->size = size;
- dma->type = map_type;
+ dma->type = MLX5E_DMA_MAP_SINGLE;
+}
+
+static inline void
+mlx5e_dma_push_netmem(struct mlx5e_txqsq *sq, netmem_ref netmem,
+ dma_addr_t addr, u32 size)
+{
+ struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
+
+ netmem_dma_unmap_addr_set(netmem, dma, addr, addr);
+ dma->size = size;
+ dma->type = MLX5E_DMA_MAP_PAGE;
}
static inline
@@ -362,7 +372,8 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
break;
case MLX5E_DMA_MAP_PAGE:
- dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
+ netmem_dma_unmap_page_attrs(pdev, dma->addr, dma->size,
+ DMA_TO_DEVICE, 0);
break;
default:
WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 5ce1b463b7a8..5d51600935a6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -710,7 +710,8 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
/* No need to check page_pool_page_is_pp() as we
* know this is a page_pool page.
*/
- page_pool_recycle_direct(page->pp, page);
+ page_pool_recycle_direct(pp_page_to_nmdesc(page)->pp,
+ page);
} while (++n < num);
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 77f61cd28a79..00e77c71e201 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -36,6 +36,7 @@
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
#include <net/netevent.h>
+#include <net/ipv6_stubs.h>
#include "en.h"
#include "eswitch.h"
@@ -259,9 +260,15 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+ struct mlx5e_ipsec_addr *addrs = &attrs->addrs;
struct net_device *netdev = sa_entry->dev;
+ struct xfrm_state *x = sa_entry->x;
+ struct dst_entry *rt_dst_entry;
+ struct flowi4 fl4 = {};
+ struct flowi6 fl6 = {};
struct neighbour *n;
u8 addr[ETH_ALEN];
+ struct rtable *rt;
const void *pkey;
u8 *dst, *src;
@@ -274,18 +281,89 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
case XFRM_DEV_OFFLOAD_IN:
src = attrs->dmac;
dst = attrs->smac;
- pkey = &attrs->addrs.saddr.a4;
+
+ switch (addrs->family) {
+ case AF_INET:
+ fl4.flowi4_proto = x->sel.proto;
+ fl4.daddr = addrs->saddr.a4;
+ fl4.saddr = addrs->daddr.a4;
+ pkey = &addrs->saddr.a4;
+ break;
+ case AF_INET6:
+ fl6.flowi6_proto = x->sel.proto;
+ memcpy(fl6.daddr.s6_addr32, addrs->saddr.a6, 16);
+ memcpy(fl6.saddr.s6_addr32, addrs->daddr.a6, 16);
+ pkey = &addrs->saddr.a6;
+ break;
+ default:
+ return;
+ }
break;
case XFRM_DEV_OFFLOAD_OUT:
src = attrs->smac;
dst = attrs->dmac;
- pkey = &attrs->addrs.daddr.a4;
+ switch (addrs->family) {
+ case AF_INET:
+ fl4.flowi4_proto = x->sel.proto;
+ fl4.daddr = addrs->daddr.a4;
+ fl4.saddr = addrs->saddr.a4;
+ pkey = &addrs->daddr.a4;
+ break;
+ case AF_INET6:
+ fl6.flowi6_proto = x->sel.proto;
+ memcpy(fl6.daddr.s6_addr32, addrs->daddr.a6, 16);
+ memcpy(fl6.saddr.s6_addr32, addrs->saddr.a6, 16);
+ pkey = &addrs->daddr.a6;
+ break;
+ default:
+ return;
+ }
break;
default:
return;
}
ether_addr_copy(src, addr);
+
+ /* Destination can refer to a routed network, so perform FIB lookup
+ * to resolve nexthop and get its MAC. Neighbour resolution is used as
+ * fallback.
+ */
+ switch (addrs->family) {
+ case AF_INET:
+ rt = ip_route_output_key(dev_net(netdev), &fl4);
+ if (IS_ERR(rt))
+ goto neigh;
+
+ if (rt->rt_type != RTN_UNICAST) {
+ ip_rt_put(rt);
+ goto neigh;
+ }
+ rt_dst_entry = &rt->dst;
+ break;
+ case AF_INET6:
+ rt_dst_entry = ipv6_stub->ipv6_dst_lookup_flow(
+ dev_net(netdev), NULL, &fl6, NULL);
+ if (IS_ERR(rt_dst_entry))
+ goto neigh;
+ break;
+ default:
+ return;
+ }
+
+ n = dst_neigh_lookup(rt_dst_entry, pkey);
+ if (!n) {
+ dst_release(rt_dst_entry);
+ goto neigh;
+ }
+
+ neigh_ha_snapshot(addr, n, netdev);
+ ether_addr_copy(dst, addr);
+ dst_release(rt_dst_entry);
+ neigh_release(n);
+ return;
+
+neigh:
n = neigh_lookup(&arp_tbl, pkey, netdev);
if (!n) {
n = neigh_create(&arp_tbl, pkey, netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index 820debf3fbbf..ef7322d381af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -42,8 +42,7 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload) &&
(mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS ||
- (mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS &&
- is_mdev_legacy_mode(mdev)))) {
+ is_mdev_legacy_mode(mdev))) {
if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
reformat_add_esp_trasport) &&
MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 727fa7c18523..6056106edcc6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -327,6 +327,10 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
if (unlikely(!sa_entry)) {
rcu_read_unlock();
atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
+ /* Clear secpath to prevent invalid dereference
+ * in downstream XFRM policy checks.
+ */
+ secpath_reset(skb);
return;
}
xfrm_state_hold(sa_entry->x);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 3db31cc10719..08f06984407b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -744,7 +744,7 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn)
dseg->addr = cpu_to_be64(dma_addr);
dseg->lkey = sq->mkey_be;
dseg->byte_count = cpu_to_be32(fsz);
- mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
+ mlx5e_dma_push_netmem(sq, skb_frag_netmem(frag), dma_addr, fsz);
tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag));
sq->pc += MLX5E_KTLS_DUMP_WQEBBS;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
index 298bb74ec5e9..d1d629697e28 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
@@ -113,7 +113,7 @@ int mlx5e_dim_rx_change(struct mlx5e_rq *rq, bool enable)
__set_bit(MLX5E_RQ_STATE_DIM, &rq->state);
} else {
__clear_bit(MLX5E_RQ_STATE_DIM, &rq->state);
-
+ synchronize_net();
mlx5e_dim_disable(rq->dim);
rq->dim = NULL;
}
@@ -140,7 +140,7 @@ int mlx5e_dim_tx_change(struct mlx5e_txqsq *sq, bool enable)
__set_bit(MLX5E_SQ_STATE_DIM, &sq->state);
} else {
__clear_bit(MLX5E_SQ_STATE_DIM, &sq->state);
-
+ synchronize_net();
mlx5e_dim_disable(sq->dim);
sq->dim = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 3cb8d3bf9044..d507366d773e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -32,6 +32,7 @@
#include <linux/dim.h>
#include <linux/ethtool_netlink.h>
+#include <net/netdev_queues.h>
#include "en.h"
#include "en/channels.h"
@@ -365,11 +366,6 @@ void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
param->rx_pending = 1 << priv->channels.params.log_rq_mtu_frames;
param->tx_pending = 1 << priv->channels.params.log_sq_size;
-
- kernel_param->tcp_data_split =
- (priv->channels.params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) ?
- ETHTOOL_TCP_DATA_SPLIT_ENABLED :
- ETHTOOL_TCP_DATA_SPLIT_DISABLED;
}
static void mlx5e_get_ringparam(struct net_device *dev,
@@ -382,6 +378,27 @@ static void mlx5e_get_ringparam(struct net_device *dev,
mlx5e_ethtool_get_ringparam(priv, param, kernel_param);
}
+static bool mlx5e_ethtool_set_tcp_data_split(struct mlx5e_priv *priv,
+ u8 tcp_data_split,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *dev = priv->netdev;
+
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED &&
+ !(dev->features & NETIF_F_GRO_HW)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "TCP-data-split is not supported when GRO HW is disabled");
+ return false;
+ }
+
+ /* Might need to disable HW-GRO if it was kept on due to hds. */
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_DISABLED &&
+ dev->cfg->hds_config == ETHTOOL_TCP_DATA_SPLIT_ENABLED)
+ netdev_update_features(priv->netdev);
+
+ return true;
+}
+
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param,
struct netlink_ext_ack *extack)
@@ -440,6 +457,11 @@ static int mlx5e_set_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ if (!mlx5e_ethtool_set_tcp_data_split(priv,
+ kernel_param->tcp_data_split,
+ extack))
+ return -EINVAL;
+
return mlx5e_ethtool_set_ringparam(priv, param, extack);
}
@@ -1458,70 +1480,144 @@ static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
static int mlx5e_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- u32 rss_context = rxfh->rss_context;
bool symmetric;
- int err;
mutex_lock(&priv->state_lock);
- err = mlx5e_rx_res_rss_get_rxfh(priv->rx_res, rss_context,
- rxfh->indir, rxfh->key, &rxfh->hfunc, &symmetric);
+ mlx5e_rx_res_rss_get_rxfh(priv->rx_res, 0, rxfh->indir, rxfh->key,
+ &rxfh->hfunc, &symmetric);
mutex_unlock(&priv->state_lock);
- if (err)
- return err;
-
if (symmetric)
rxfh->input_xfrm = RXH_XFRM_SYM_OR_XOR;
return 0;
}
-static int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
- struct netlink_ext_ack *extack)
+static int mlx5e_rxfh_hfunc_check(struct mlx5e_priv *priv,
+ const struct ethtool_rxfh_param *rxfh)
{
- bool symmetric = rxfh->input_xfrm == RXH_XFRM_SYM_OR_XOR;
- struct mlx5e_priv *priv = netdev_priv(dev);
- u32 *rss_context = &rxfh->rss_context;
- u8 hfunc = rxfh->hfunc;
unsigned int count;
- int err;
-
- mutex_lock(&priv->state_lock);
count = priv->channels.params.num_channels;
- if (hfunc == ETH_RSS_HASH_XOR) {
+ if (rxfh->hfunc == ETH_RSS_HASH_XOR) {
unsigned int xor8_max_channels = mlx5e_rqt_max_num_channels_allowed_for_xor8();
if (count > xor8_max_channels) {
- err = -EINVAL;
netdev_err(priv->netdev, "%s: Cannot set RSS hash function to XOR, current number of channels (%d) exceeds the maximum allowed for XOR8 RSS hfunc (%d)\n",
__func__, count, xor8_max_channels);
- goto unlock;
+ return -EINVAL;
}
}
- if (*rss_context && rxfh->rss_delete) {
- err = mlx5e_rx_res_rss_destroy(priv->rx_res, *rss_context);
+ return 0;
+}
+
+static int mlx5e_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ bool symmetric = rxfh->input_xfrm == RXH_XFRM_SYM_OR_XOR;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ u8 hfunc = rxfh->hfunc;
+ int err;
+
+ mutex_lock(&priv->state_lock);
+
+ err = mlx5e_rxfh_hfunc_check(priv, rxfh);
+ if (err)
goto unlock;
- }
- if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- err = mlx5e_rx_res_rss_init(priv->rx_res, rss_context, count);
- if (err)
- goto unlock;
- }
+ err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, rxfh->rss_context,
+ rxfh->indir, rxfh->key,
+ hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc,
+ rxfh->input_xfrm == RXH_XFRM_NO_CHANGE ? NULL : &symmetric);
+
+unlock:
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
+static int mlx5e_create_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ bool symmetric = rxfh->input_xfrm == RXH_XFRM_SYM_OR_XOR;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ u8 hfunc = rxfh->hfunc;
+ int err;
+
+ mutex_lock(&priv->state_lock);
+
+ err = mlx5e_rxfh_hfunc_check(priv, rxfh);
+ if (err)
+ goto unlock;
- err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, *rss_context,
+ err = mlx5e_rx_res_rss_init(priv->rx_res, rxfh->rss_context,
+ priv->channels.params.num_channels);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, rxfh->rss_context,
rxfh->indir, rxfh->key,
hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc,
rxfh->input_xfrm == RXH_XFRM_NO_CHANGE ? NULL : &symmetric);
+ if (err)
+ goto unlock;
+
+ mlx5e_rx_res_rss_get_rxfh(priv->rx_res, rxfh->rss_context,
+ ethtool_rxfh_context_indir(ctx),
+ ethtool_rxfh_context_key(ctx),
+ &ctx->hfunc, &symmetric);
+ if (symmetric)
+ ctx->input_xfrm = RXH_XFRM_SYM_OR_XOR;
unlock:
mutex_unlock(&priv->state_lock);
return err;
}
+static int mlx5e_modify_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ bool symmetric = rxfh->input_xfrm == RXH_XFRM_SYM_OR_XOR;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ u8 hfunc = rxfh->hfunc;
+ int err;
+
+ mutex_lock(&priv->state_lock);
+
+ err = mlx5e_rxfh_hfunc_check(priv, rxfh);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, rxfh->rss_context,
+ rxfh->indir, rxfh->key,
+ hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc,
+ rxfh->input_xfrm == RXH_XFRM_NO_CHANGE ? NULL : &symmetric);
+
+unlock:
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
+static int mlx5e_remove_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ u32 rss_context,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int err;
+
+ mutex_lock(&priv->state_lock);
+ err = mlx5e_rx_res_rss_destroy(priv->rx_res, rss_context);
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
#define MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC 100
#define MLX5E_PFC_PREVEN_TOUT_MAX_MSEC 8000
#define MLX5E_PFC_PREVEN_MINOR_PRECENT 85
@@ -2377,6 +2473,23 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev)
return priv->channels.params.pflags;
}
+static int mlx5e_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_get_rxfh_fields(priv, info);
+}
+
+static int mlx5e_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_set_rxfh_fields(priv, cmd, extack);
+}
+
static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rule_locs)
{
@@ -2615,13 +2728,15 @@ static void mlx5e_get_ts_stats(struct net_device *netdev,
const struct ethtool_ops mlx5e_ethtool_ops = {
.cap_link_lanes_supported = true,
- .cap_rss_ctx_supported = true,
+ .rxfh_per_ctx_fields = true,
.rxfh_per_ctx_key = true,
+ .rxfh_max_num_contexts = MLX5E_MAX_NUM_RSS,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_USE_CQE,
.supported_input_xfrm = RXH_XFRM_SYM_OR_XOR,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ext_state = mlx5e_get_link_ext_state,
@@ -2642,6 +2757,11 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
.get_rxfh = mlx5e_get_rxfh,
.set_rxfh = mlx5e_set_rxfh,
+ .get_rxfh_fields = mlx5e_get_rxfh_fields,
+ .set_rxfh_fields = mlx5e_set_rxfh_fields,
+ .create_rxfh_context = mlx5e_create_rxfh_context,
+ .modify_rxfh_context = mlx5e_modify_rxfh_context,
+ .remove_rxfh_context = mlx5e_remove_rxfh_context,
.get_rxnfc = mlx5e_get_rxnfc,
.set_rxnfc = mlx5e_set_rxnfc,
.get_tunable = mlx5e_get_tunable,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 04a969128161..265c4ca85f7d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -780,7 +780,7 @@ static int mlx5e_create_promisc_table(struct mlx5e_flow_steering *fs)
ft_attr.max_fte = MLX5E_PROMISC_TABLE_SIZE;
ft_attr.autogroup.max_num_groups = 1;
ft_attr.level = MLX5E_PROMISC_FT_LEVEL;
- ft_attr.prio = MLX5E_NIC_PRIO;
+ ft_attr.prio = MLX5E_PROMISC_PRIO;
ft->t = mlx5_create_auto_grouped_flow_table(fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index d68230a7b9f4..79916f1abd14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -894,17 +894,17 @@ static int flow_type_to_traffic_type(u32 flow_type)
}
}
-static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
- struct ethtool_rxnfc *nfc)
+int mlx5e_ethtool_set_rxfh_fields(struct mlx5e_priv *priv,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
u8 rx_hash_field = 0;
u32 flow_type = 0;
- u32 rss_idx = 0;
+ u32 rss_idx;
int err;
int tt;
- if (nfc->flow_type & FLOW_RSS)
- rss_idx = nfc->rss_context;
+ rss_idx = nfc->rss_context;
flow_type = flow_type_mask(nfc->flow_type);
tt = flow_type_to_traffic_type(flow_type);
@@ -941,16 +941,15 @@ static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
return err;
}
-static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
- struct ethtool_rxnfc *nfc)
+int mlx5e_ethtool_get_rxfh_fields(struct mlx5e_priv *priv,
+ struct ethtool_rxfh_fields *nfc)
{
int hash_field = 0;
u32 flow_type = 0;
- u32 rss_idx = 0;
+ u32 rss_idx;
int tt;
- if (nfc->flow_type & FLOW_RSS)
- rss_idx = nfc->rss_context;
+ rss_idx = nfc->rss_context;
flow_type = flow_type_mask(nfc->flow_type);
tt = flow_type_to_traffic_type(flow_type);
@@ -986,9 +985,6 @@ int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
case ETHTOOL_SRXCLSRLDEL:
err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
break;
- case ETHTOOL_SRXFH:
- err = mlx5e_set_rss_hash_opt(priv, cmd);
- break;
default:
err = -EOPNOTSUPP;
break;
@@ -1013,9 +1009,6 @@ int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
case ETHTOOL_GRXCLSRLALL:
err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
break;
- case ETHTOOL_GRXFH:
- err = mlx5e_get_rss_hash_opt(priv, info);
- break;
default:
err = -EOPNOTSUPP;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index ea822c69d137..21bb88c5d3dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -41,6 +41,7 @@
#include <linux/filter.h>
#include <net/netdev_lock.h>
#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
#include <net/page_pool/types.h>
#include <net/pkt_sched.h>
#include <net/xdp_sock_drv.h>
@@ -75,10 +76,12 @@
#include "en/trap.h"
#include "lib/devcom.h"
#include "lib/sd.h"
+#include "en/pcie_cong_event.h"
static bool mlx5e_hw_gro_supported(struct mlx5_core_dev *mdev)
{
- if (!MLX5_CAP_GEN(mdev, shampo))
+ if (!MLX5_CAP_GEN(mdev, shampo) ||
+ !MLX5_CAP_SHAMPO(mdev, shampo_header_split_data_merge))
return false;
/* Our HW-GRO implementation relies on "KSM Mkey" for
@@ -331,47 +334,6 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
-static int mlx5e_rq_shampo_hd_alloc(struct mlx5e_rq *rq, int node)
-{
- rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq->mpwqe.shampo),
- GFP_KERNEL, node);
- if (!rq->mpwqe.shampo)
- return -ENOMEM;
- return 0;
-}
-
-static void mlx5e_rq_shampo_hd_free(struct mlx5e_rq *rq)
-{
- kvfree(rq->mpwqe.shampo);
-}
-
-static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node)
-{
- struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
-
- shampo->bitmap = bitmap_zalloc_node(shampo->hd_per_wq, GFP_KERNEL,
- node);
- shampo->pages = kvzalloc_node(array_size(shampo->hd_per_wq,
- sizeof(*shampo->pages)),
- GFP_KERNEL, node);
- if (!shampo->bitmap || !shampo->pages)
- goto err_nomem;
-
- return 0;
-
-err_nomem:
- bitmap_free(shampo->bitmap);
- kvfree(shampo->pages);
-
- return -ENOMEM;
-}
-
-static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
-{
- bitmap_free(rq->mpwqe.shampo->bitmap);
- kvfree(rq->mpwqe.shampo->pages);
-}
-
static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
{
int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
@@ -584,19 +546,26 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq
}
static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev,
- struct mlx5e_rq *rq)
+ u16 hd_per_wq, __be32 *umr_mkey)
{
u32 max_ksm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size));
+ u32 mkey;
+ int err;
- if (max_ksm_size < rq->mpwqe.shampo->hd_per_wq) {
+ if (max_ksm_size < hd_per_wq) {
mlx5_core_err(mdev, "max ksm list size 0x%x is smaller than shampo header buffer list size 0x%x\n",
- max_ksm_size, rq->mpwqe.shampo->hd_per_wq);
+ max_ksm_size, hd_per_wq);
return -EINVAL;
}
- return mlx5e_create_umr_ksm_mkey(mdev, rq->mpwqe.shampo->hd_per_wq,
- MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE,
- &rq->mpwqe.shampo->mkey);
+ err = mlx5e_create_umr_ksm_mkey(mdev, hd_per_wq,
+ MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE,
+ &mkey);
+ if (err)
+ return err;
+
+ *umr_mkey = cpu_to_be32(mkey);
+ return 0;
}
static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
@@ -707,6 +676,27 @@ static void mlx5e_rq_err_cqe_work(struct work_struct *recover_work)
mlx5e_reporter_rq_cqe_err(rq);
}
+static void mlx5e_rq_timeout_work(struct work_struct *timeout_work)
+{
+ struct mlx5e_rq *rq = container_of(timeout_work,
+ struct mlx5e_rq,
+ rx_timeout_work);
+
+ /* Acquire netdev instance lock to synchronize with channel close and
+ * reopen flows. Either successfully obtain the lock, or detect that
+ * channels are closing for another reason, making this work no longer
+ * necessary.
+ */
+ while (!netdev_trylock(rq->netdev)) {
+ if (!test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &rq->priv->state))
+ return;
+ msleep(20);
+ }
+
+ mlx5e_reporter_rx_timeout(rq);
+ netdev_unlock(rq->netdev);
+}
+
static int mlx5e_alloc_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
{
rq->wqe_overflow.page = alloc_page(GFP_KERNEL);
@@ -758,6 +748,42 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
xdp_frag_size);
}
+static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, u16 hd_per_wq,
+ int node)
+{
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+
+ shampo->hd_per_wq = hd_per_wq;
+
+ shampo->bitmap = bitmap_zalloc_node(hd_per_wq, GFP_KERNEL, node);
+ shampo->pages = kvzalloc_node(array_size(hd_per_wq,
+ sizeof(*shampo->pages)),
+ GFP_KERNEL, node);
+ if (!shampo->bitmap || !shampo->pages)
+ goto err_nomem;
+
+ return 0;
+
+err_nomem:
+ kvfree(shampo->pages);
+ bitmap_free(shampo->bitmap);
+
+ return -ENOMEM;
+}
+
+static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
+{
+ kvfree(rq->mpwqe.shampo->pages);
+ bitmap_free(rq->mpwqe.shampo->bitmap);
+}
+
+static bool mlx5_rq_needs_separate_hd_pool(struct mlx5e_rq *rq)
+{
+ struct netdev_rx_queue *rxq = __netif_get_rx_queue(rq->netdev, rq->ix);
+
+ return !!rxq->mp_params.mp_ops;
+}
+
static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rqp,
@@ -765,42 +791,80 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
u32 *pool_size,
int node)
{
+ void *wqc = MLX5_ADDR_OF(rqc, rqp->rqc, wq);
+ u32 hd_pool_size;
+ u16 hd_per_wq;
+ int wq_size;
int err;
if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
return 0;
- err = mlx5e_rq_shampo_hd_alloc(rq, node);
- if (err)
- goto out;
- rq->mpwqe.shampo->hd_per_wq =
- mlx5e_shampo_hd_per_wq(mdev, params, rqp);
- err = mlx5e_create_rq_hd_umr_mkey(mdev, rq);
+
+ rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq->mpwqe.shampo),
+ GFP_KERNEL, node);
+ if (!rq->mpwqe.shampo)
+ return -ENOMEM;
+
+ /* split headers data structures */
+ hd_per_wq = mlx5e_shampo_hd_per_wq(mdev, params, rqp);
+ err = mlx5e_rq_shampo_hd_info_alloc(rq, hd_per_wq, node);
if (err)
- goto err_shampo_hd;
- err = mlx5e_rq_shampo_hd_info_alloc(rq, node);
+ goto err_shampo_hd_info_alloc;
+
+ err = mlx5e_create_rq_hd_umr_mkey(mdev, hd_per_wq,
+ &rq->mpwqe.shampo->mkey_be);
if (err)
- goto err_shampo_info;
+ goto err_umr_mkey;
+
+ rq->mpwqe.shampo->hd_per_wqe =
+ mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
+ wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
+ hd_pool_size = (rq->mpwqe.shampo->hd_per_wqe * wq_size) /
+ MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
+
+ if (mlx5_rq_needs_separate_hd_pool(rq)) {
+ /* Separate page pool for shampo headers */
+ struct page_pool_params pp_params = { };
+
+ pp_params.order = 0;
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.pool_size = hd_pool_size;
+ pp_params.nid = node;
+ pp_params.dev = rq->pdev;
+ pp_params.napi = rq->cq.napi;
+ pp_params.netdev = rq->netdev;
+ pp_params.dma_dir = rq->buff.map_dir;
+ pp_params.max_len = PAGE_SIZE;
+
+ rq->hd_page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rq->hd_page_pool)) {
+ err = PTR_ERR(rq->hd_page_pool);
+ rq->hd_page_pool = NULL;
+ goto err_hds_page_pool;
+ }
+ } else {
+ /* Common page pool, reserve space for headers. */
+ *pool_size += hd_pool_size;
+ rq->hd_page_pool = NULL;
+ }
+
+ /* gro only data structures */
rq->hw_gro_data = kvzalloc_node(sizeof(*rq->hw_gro_data), GFP_KERNEL, node);
if (!rq->hw_gro_data) {
err = -ENOMEM;
goto err_hw_gro_data;
}
- rq->mpwqe.shampo->key =
- cpu_to_be32(rq->mpwqe.shampo->mkey);
- rq->mpwqe.shampo->hd_per_wqe =
- mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
- rq->mpwqe.shampo->pages_per_wq =
- rq->mpwqe.shampo->hd_per_wq / MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
- *pool_size += rq->mpwqe.shampo->pages_per_wq;
+
return 0;
err_hw_gro_data:
+ page_pool_destroy(rq->hd_page_pool);
+err_hds_page_pool:
+ mlx5_core_destroy_mkey(mdev, be32_to_cpu(rq->mpwqe.shampo->mkey_be));
+err_umr_mkey:
mlx5e_rq_shampo_hd_info_free(rq);
-err_shampo_info:
- mlx5_core_destroy_mkey(mdev, rq->mpwqe.shampo->mkey);
-err_shampo_hd:
- mlx5e_rq_shampo_hd_free(rq);
-out:
+err_shampo_hd_info_alloc:
+ kvfree(rq->mpwqe.shampo);
return err;
}
@@ -810,9 +874,12 @@ static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq)
return;
kvfree(rq->hw_gro_data);
+ if (rq->hd_page_pool != rq->page_pool)
+ page_pool_destroy(rq->hd_page_pool);
mlx5e_rq_shampo_hd_info_free(rq);
- mlx5_core_destroy_mkey(rq->mdev, rq->mpwqe.shampo->mkey);
- mlx5e_rq_shampo_hd_free(rq);
+ mlx5_core_destroy_mkey(rq->mdev,
+ be32_to_cpu(rq->mpwqe.shampo->mkey_be));
+ kvfree(rq->mpwqe.shampo);
}
static int mlx5e_alloc_rq(struct mlx5e_params *params,
@@ -830,6 +897,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
rqp->wq.db_numa_node = node;
INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work);
+ INIT_WORK(&rq->rx_timeout_work, mlx5e_rq_timeout_work);
if (params->xdp_prog)
bpf_prog_inc(params->xdp_prog);
@@ -915,6 +983,8 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
if (xsk) {
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL, NULL);
+ if (err)
+ goto err_free_by_rq_type;
xsk_pool_set_rxq_info(rq->xsk_pool, &rq->xdp_rxq);
} else {
/* Create a page_pool and register it with rxq */
@@ -929,6 +999,11 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
pp_params.netdev = rq->netdev;
pp_params.dma_dir = rq->buff.map_dir;
pp_params.max_len = PAGE_SIZE;
+ pp_params.queue_idx = rq->ix;
+
+ /* Shampo header data split allow for unreadable netmem */
+ if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
+ pp_params.flags |= PP_FLAG_ALLOW_UNREADABLE_NETMEM;
/* page_pool can be used even when there is no rq->xdp_prog,
* given page_pool does not handle DMA mapping there is no
@@ -941,12 +1016,15 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
rq->page_pool = NULL;
goto err_free_by_rq_type;
}
- if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
+ if (!rq->hd_page_pool)
+ rq->hd_page_pool = rq->page_pool;
+ if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) {
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
MEM_TYPE_PAGE_POOL, rq->page_pool);
+ if (err)
+ goto err_destroy_page_pool;
+ }
}
- if (err)
- goto err_destroy_page_pool;
for (i = 0; i < wq_sz; i++) {
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
@@ -1074,7 +1152,8 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_cou
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
MLX5_SET(wq, wq, log_headers_buffer_entry_num,
order_base_2(rq->mpwqe.shampo->hd_per_wq));
- MLX5_SET(wq, wq, headers_mkey, rq->mpwqe.shampo->mkey);
+ MLX5_SET(wq, wq, headers_mkey,
+ be32_to_cpu(rq->mpwqe.shampo->mkey_be));
}
mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
@@ -1204,7 +1283,8 @@ int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
netdev_warn(rq->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
rq->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
- mlx5e_reporter_rx_timeout(rq);
+ queue_work(rq->priv->wq, &rq->rx_timeout_work);
+
return -ETIMEDOUT;
}
@@ -1375,6 +1455,7 @@ void mlx5e_close_rq(struct mlx5e_rq *rq)
if (rq->dim)
cancel_work_sync(&rq->dim->work);
cancel_work_sync(&rq->recover_work);
+ cancel_work_sync(&rq->rx_timeout_work);
mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq);
mlx5e_free_rq(rq);
@@ -1630,8 +1711,6 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
sq->max_sq_mpw_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
- if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
- set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
if (mlx5_ipsec_device_caps(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
if (param->is_mpw)
@@ -4043,10 +4122,6 @@ static int set_feature_hw_gro(struct net_device *netdev, bool enable)
if (enable) {
new_params.packet_merge.type = MLX5E_PACKET_MERGE_SHAMPO;
- new_params.packet_merge.shampo.match_criteria_type =
- MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED;
- new_params.packet_merge.shampo.alignment_granularity =
- MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE;
} else if (new_params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
new_params.packet_merge.type = MLX5E_PACKET_MERGE_NONE;
} else {
@@ -4373,6 +4448,7 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_features_t features)
{
+ struct netdev_config *cfg = netdev->cfg_pending;
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_vlan_table *vlan;
struct mlx5e_params *params;
@@ -4439,6 +4515,13 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
}
}
+ /* The header-data split ring param requires HW GRO to stay enabled. */
+ if (cfg && cfg->hds_config == ETHTOOL_TCP_DATA_SPLIT_ENABLED &&
+ !(features & NETIF_F_GRO_HW)) {
+ netdev_warn(netdev, "Keeping HW-GRO enabled, TCP header-data split depends on it\n");
+ features |= NETIF_F_GRO_HW;
+ }
+
if (mlx5e_is_uplink_rep(priv)) {
features = mlx5e_fix_uplink_rep_features(netdev, features);
netdev->netns_immutable = true;
@@ -5303,8 +5386,7 @@ void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv)
priv->nic_info.set_port = mlx5e_vxlan_set_port;
priv->nic_info.unset_port = mlx5e_vxlan_unset_port;
- priv->nic_info.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN;
+ priv->nic_info.flags = UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN;
priv->nic_info.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN;
/* Don't count the space hard-coded to the IANA port */
priv->nic_info.tables[0].n_entries =
@@ -5454,6 +5536,103 @@ static const struct netdev_stat_ops mlx5e_stat_ops = {
.get_base_stats = mlx5e_get_base_stats,
};
+struct mlx5_qmgmt_data {
+ struct mlx5e_channel *c;
+ struct mlx5e_channel_param cparam;
+};
+
+static int mlx5e_queue_mem_alloc(struct net_device *dev, void *newq,
+ int queue_index)
+{
+ struct mlx5_qmgmt_data *new = (struct mlx5_qmgmt_data *)newq;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_channels *chs = &priv->channels;
+ struct mlx5e_params params = chs->params;
+ struct mlx5_core_dev *mdev;
+ int err;
+
+ mutex_lock(&priv->state_lock);
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = -ENODEV;
+ goto unlock;
+ }
+
+ if (queue_index >= chs->num) {
+ err = -ERANGE;
+ goto unlock;
+ }
+
+ if (MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS) ||
+ chs->params.ptp_rx ||
+ chs->params.xdp_prog ||
+ priv->htb) {
+ netdev_err(priv->netdev,
+ "Cloning channels with Port/rx PTP, XDP or HTB is not supported\n");
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ mdev = mlx5_sd_ch_ix_get_dev(priv->mdev, queue_index);
+ err = mlx5e_build_channel_param(mdev, &params, &new->cparam);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_open_channel(priv, queue_index, &params, NULL, &new->c);
+unlock:
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
+static void mlx5e_queue_mem_free(struct net_device *dev, void *mem)
+{
+ struct mlx5_qmgmt_data *data = (struct mlx5_qmgmt_data *)mem;
+
+ /* not supposed to happen since mlx5e_queue_start never fails
+ * but this is how this should be implemented just in case
+ */
+ if (data->c)
+ mlx5e_close_channel(data->c);
+}
+
+static int mlx5e_queue_stop(struct net_device *dev, void *oldq, int queue_index)
+{
+ /* In mlx5 a txq cannot be simply stopped in isolation, only restarted.
+ * mlx5e_queue_start does not fail, we stop the old queue there.
+ * TODO: Improve this.
+ */
+ return 0;
+}
+
+static int mlx5e_queue_start(struct net_device *dev, void *newq,
+ int queue_index)
+{
+ struct mlx5_qmgmt_data *new = (struct mlx5_qmgmt_data *)newq;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_channel *old;
+
+ mutex_lock(&priv->state_lock);
+
+ /* stop and close the old */
+ old = priv->channels.c[queue_index];
+ mlx5e_deactivate_priv_channels(priv);
+ /* close old before activating new, to avoid napi conflict */
+ mlx5e_close_channel(old);
+
+ /* start the new */
+ priv->channels.c[queue_index] = new->c;
+ mlx5e_activate_priv_channels(priv);
+ mutex_unlock(&priv->state_lock);
+ return 0;
+}
+
+static const struct netdev_queue_mgmt_ops mlx5e_queue_mgmt_ops = {
+ .ndo_queue_mem_size = sizeof(struct mlx5_qmgmt_data),
+ .ndo_queue_mem_alloc = mlx5e_queue_mem_alloc,
+ .ndo_queue_mem_free = mlx5e_queue_mem_free,
+ .ndo_queue_start = mlx5e_queue_start,
+ .ndo_queue_stop = mlx5e_queue_stop,
+};
+
static void mlx5e_build_nic_netdev(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -5464,6 +5643,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
SET_NETDEV_DEV(netdev, mdev->device);
netdev->netdev_ops = &mlx5e_netdev_ops;
+ netdev->queue_mgmt_ops = &mlx5e_queue_mgmt_ops;
netdev->xdp_metadata_ops = &mlx5e_xdp_metadata_ops;
netdev->xsk_tx_metadata_ops = &mlx5e_xsk_tx_metadata_ops;
netdev->request_ops_lock = true;
@@ -5506,17 +5686,17 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
MLX5E_MPWRQ_UMR_MODE_ALIGNED))
netdev->vlan_features |= NETIF_F_LRO;
+ if (mlx5e_hw_gro_supported(mdev) &&
+ mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT,
+ MLX5E_MPWRQ_UMR_MODE_ALIGNED))
+ netdev->vlan_features |= NETIF_F_GRO_HW;
+
netdev->hw_features = netdev->vlan_features;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
- if (mlx5e_hw_gro_supported(mdev) &&
- mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT,
- MLX5E_MPWRQ_UMR_MODE_ALIGNED))
- netdev->hw_features |= NETIF_F_GRO_HW;
-
if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
@@ -5595,6 +5775,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->netmem_tx = true;
+
netif_set_tso_max_size(netdev, GSO_MAX_SIZE);
mlx5e_set_xdp_feature(netdev);
mlx5e_set_netdev_dev_addr(netdev);
@@ -5841,6 +6023,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_init(priv);
+ mlx5e_pcie_cong_event_init(priv);
mlx5e_hv_vhca_stats_create(priv);
if (netdev->reg_state != NETREG_REGISTERED)
return;
@@ -5880,6 +6063,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5e_nic_set_rx_mode(priv);
+ mlx5e_pcie_cong_event_cleanup(priv);
mlx5e_hv_vhca_stats_destroy(priv);
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_cleanup(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 84b1ab8233b8..218b1a09534c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -273,33 +273,32 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
#define MLX5E_PAGECNT_BIAS_MAX (PAGE_SIZE / 64)
-static int mlx5e_page_alloc_fragmented(struct mlx5e_rq *rq,
+static int mlx5e_page_alloc_fragmented(struct page_pool *pp,
struct mlx5e_frag_page *frag_page)
{
- struct page *page;
+ netmem_ref netmem = page_pool_dev_alloc_netmems(pp);
- page = page_pool_dev_alloc_pages(rq->page_pool);
- if (unlikely(!page))
+ if (unlikely(!netmem))
return -ENOMEM;
- page_pool_fragment_page(page, MLX5E_PAGECNT_BIAS_MAX);
+ page_pool_fragment_netmem(netmem, MLX5E_PAGECNT_BIAS_MAX);
*frag_page = (struct mlx5e_frag_page) {
- .page = page,
+ .netmem = netmem,
.frags = 0,
};
return 0;
}
-static void mlx5e_page_release_fragmented(struct mlx5e_rq *rq,
+static void mlx5e_page_release_fragmented(struct page_pool *pp,
struct mlx5e_frag_page *frag_page)
{
u16 drain_count = MLX5E_PAGECNT_BIAS_MAX - frag_page->frags;
- struct page *page = frag_page->page;
+ netmem_ref netmem = frag_page->netmem;
- if (page_pool_unref_page(page, drain_count) == 0)
- page_pool_put_unrefed_page(rq->page_pool, page, -1, true);
+ if (page_pool_unref_netmem(netmem, drain_count) == 0)
+ page_pool_put_unrefed_netmem(pp, netmem, -1, true);
}
static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
@@ -313,7 +312,8 @@ static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
* offset) should just use the new one without replenishing again
* by themselves.
*/
- err = mlx5e_page_alloc_fragmented(rq, frag->frag_page);
+ err = mlx5e_page_alloc_fragmented(rq->page_pool,
+ frag->frag_page);
return err;
}
@@ -332,7 +332,7 @@ static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *frag)
{
if (mlx5e_frag_can_release(frag))
- mlx5e_page_release_fragmented(rq, frag->frag_page);
+ mlx5e_page_release_fragmented(rq->page_pool, frag->frag_page);
}
static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
@@ -358,7 +358,7 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
frag->flags &= ~BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
headroom = i == 0 ? rq->buff.headroom : 0;
- addr = page_pool_get_dma_addr(frag->frag_page->page);
+ addr = page_pool_get_dma_addr_netmem(frag->frag_page->netmem);
wqe->data[i].addr = cpu_to_be64(addr + frag->offset + headroom);
}
@@ -499,9 +499,10 @@ mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinf
struct xdp_buff *xdp, struct mlx5e_frag_page *frag_page,
u32 frag_offset, u32 len)
{
+ netmem_ref netmem = frag_page->netmem;
skb_frag_t *frag;
- dma_addr_t addr = page_pool_get_dma_addr(frag_page->page);
+ dma_addr_t addr = page_pool_get_dma_addr_netmem(netmem);
dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, rq->buff.map_dir);
if (!xdp_buff_has_frags(xdp)) {
@@ -514,9 +515,9 @@ mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinf
}
frag = &sinfo->frags[sinfo->nr_frags++];
- skb_frag_fill_page_desc(frag, frag_page->page, frag_offset, len);
+ skb_frag_fill_netmem_desc(frag, netmem, frag_offset, len);
- if (page_is_pfmemalloc(frag_page->page))
+ if (netmem_is_pfmemalloc(netmem))
xdp_buff_set_frag_pfmemalloc(xdp);
sinfo->xdp_frags_size += len;
}
@@ -527,27 +528,29 @@ mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
u32 frag_offset, u32 len,
unsigned int truesize)
{
- dma_addr_t addr = page_pool_get_dma_addr(frag_page->page);
+ dma_addr_t addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
u8 next_frag = skb_shinfo(skb)->nr_frags;
+ netmem_ref netmem = frag_page->netmem;
dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len,
rq->buff.map_dir);
- if (skb_can_coalesce(skb, next_frag, frag_page->page, frag_offset)) {
+ if (skb_can_coalesce_netmem(skb, next_frag, netmem, frag_offset)) {
skb_coalesce_rx_frag(skb, next_frag - 1, len, truesize);
- } else {
- frag_page->frags++;
- skb_add_rx_frag(skb, next_frag, frag_page->page,
- frag_offset, len, truesize);
+ return;
}
+
+ frag_page->frags++;
+ skb_add_rx_frag_netmem(skb, next_frag, netmem,
+ frag_offset, len, truesize);
}
static inline void
mlx5e_copy_skb_header(struct mlx5e_rq *rq, struct sk_buff *skb,
- struct page *page, dma_addr_t addr,
+ netmem_ref netmem, dma_addr_t addr,
int offset_from, int dma_offset, u32 headlen)
{
- const void *from = page_address(page) + offset_from;
+ const void *from = netmem_address(netmem) + offset_from;
/* Aligning len to sizeof(long) optimizes memcpy performance */
unsigned int len = ALIGN(headlen, sizeof(long));
@@ -584,7 +587,8 @@ mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
struct mlx5e_frag_page *frag_page;
frag_page = &wi->alloc_units.frag_pages[i];
- mlx5e_page_release_fragmented(rq, frag_page);
+ mlx5e_page_release_fragmented(rq->page_pool,
+ frag_page);
}
}
}
@@ -672,19 +676,18 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
wqe_bbs = MLX5E_KSM_UMR_WQEBBS(ksm_entries);
pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
- build_ksm_umr(sq, umr_wqe, shampo->key, index, ksm_entries);
+ build_ksm_umr(sq, umr_wqe, shampo->mkey_be, index, ksm_entries);
WARN_ON_ONCE(ksm_entries & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1));
while (i < ksm_entries) {
struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
u64 addr;
- err = mlx5e_page_alloc_fragmented(rq, frag_page);
+ err = mlx5e_page_alloc_fragmented(rq->hd_page_pool, frag_page);
if (unlikely(err))
goto err_unmap;
-
- addr = page_pool_get_dma_addr(frag_page->page);
+ addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
for (int j = 0; j < MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; j++) {
header_offset = mlx5e_shampo_hd_offset(index++);
@@ -715,7 +718,8 @@ err_unmap:
if (!header_offset) {
struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
- mlx5e_page_release_fragmented(rq, frag_page);
+ mlx5e_page_release_fragmented(rq->hd_page_pool,
+ frag_page);
}
}
@@ -791,10 +795,11 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
for (i = 0; i < rq->mpwqe.pages_per_wqe; i++, frag_page++) {
dma_addr_t addr;
- err = mlx5e_page_alloc_fragmented(rq, frag_page);
+ err = mlx5e_page_alloc_fragmented(rq->page_pool, frag_page);
if (unlikely(err))
goto err_unmap;
- addr = page_pool_get_dma_addr(frag_page->page);
+
+ addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
umr_wqe->inline_mtts[i] = (struct mlx5_mtt) {
.ptag = cpu_to_be64(addr | MLX5_EN_WR),
};
@@ -836,7 +841,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
err_unmap:
while (--i >= 0) {
frag_page--;
- mlx5e_page_release_fragmented(rq, frag_page);
+ mlx5e_page_release_fragmented(rq->page_pool, frag_page);
}
bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
@@ -855,7 +860,7 @@ mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) {
struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
- mlx5e_page_release_fragmented(rq, frag_page);
+ mlx5e_page_release_fragmented(rq->hd_page_pool, frag_page);
}
clear_bit(header_index, shampo->bitmap);
}
@@ -1100,6 +1105,8 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
if (rq->page_pool)
page_pool_nid_changed(rq->page_pool, numa_mem_id());
+ if (rq->hd_page_pool)
+ page_pool_nid_changed(rq->hd_page_pool, numa_mem_id());
head = rq->mpwqe.actual_wq_head;
i = missing;
@@ -1154,8 +1161,9 @@ static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
}
}
-static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
- u32 cqe_bcnt)
+static unsigned int mlx5e_lro_update_hdr(struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe,
+ u32 cqe_bcnt)
{
struct ethhdr *eth = (struct ethhdr *)(skb->data);
struct tcphdr *tcp;
@@ -1205,6 +1213,8 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
tcp->check = tcp_v6_check(payload_len, &ipv6->saddr,
&ipv6->daddr, check);
}
+
+ return (unsigned int)((unsigned char *)tcp + tcp->doff * 4 - skb->data);
}
static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index)
@@ -1212,7 +1222,7 @@ static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index)
struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
u16 head_offset = mlx5e_shampo_hd_offset(header_index) + rq->buff.headroom;
- return page_address(frag_page->page) + head_offset;
+ return netmem_address(frag_page->netmem) + head_offset;
}
static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4)
@@ -1561,8 +1571,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
mlx5e_macsec_offload_handle_rx_skb(netdev, skb, cqe);
if (lro_num_seg > 1) {
- mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
- skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
+ unsigned int hdrlen = mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
+
+ skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt - hdrlen, lro_num_seg);
/* Subtract one since we already counted this as one
* "regular" packet in mlx5e_complete_rx_cqe()
*/
@@ -1673,11 +1684,11 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
dma_addr_t addr;
u32 frag_size;
- va = page_address(frag_page->page) + wi->offset;
+ va = netmem_address(frag_page->netmem) + wi->offset;
data = va + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
- addr = page_pool_get_dma_addr(frag_page->page);
+ addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
frag_size, rq->buff.map_dir);
net_prefetch(data);
@@ -1727,10 +1738,10 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
frag_page = wi->frag_page;
- va = page_address(frag_page->page) + wi->offset;
+ va = netmem_address(frag_page->netmem) + wi->offset;
frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
- addr = page_pool_get_dma_addr(frag_page->page);
+ addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
rq->buff.frame0_sz, rq->buff.map_dir);
net_prefetchw(va); /* xdp_frame data area */
@@ -2003,12 +2014,14 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
if (prog) {
/* area for bpf_xdp_[store|load]_bytes */
- net_prefetchw(page_address(frag_page->page) + frag_offset);
- if (unlikely(mlx5e_page_alloc_fragmented(rq, &wi->linear_page))) {
+ net_prefetchw(netmem_address(frag_page->netmem) + frag_offset);
+ if (unlikely(mlx5e_page_alloc_fragmented(rq->page_pool,
+ &wi->linear_page))) {
rq->stats->buff_alloc_err++;
return NULL;
}
- va = page_address(wi->linear_page.page);
+
+ va = netmem_address(wi->linear_page.netmem);
net_prefetchw(va); /* xdp_frame data area */
linear_hr = XDP_PACKET_HEADROOM;
linear_data_len = 0;
@@ -2068,7 +2081,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
wi->linear_page.frags++;
}
- mlx5e_page_release_fragmented(rq, &wi->linear_page);
+ mlx5e_page_release_fragmented(rq->page_pool,
+ &wi->linear_page);
return NULL; /* page/packet was consumed by XDP */
}
@@ -2077,13 +2091,14 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
mxbuf->xdp.data - mxbuf->xdp.data_hard_start, 0,
mxbuf->xdp.data - mxbuf->xdp.data_meta);
if (unlikely(!skb)) {
- mlx5e_page_release_fragmented(rq, &wi->linear_page);
+ mlx5e_page_release_fragmented(rq->page_pool,
+ &wi->linear_page);
return NULL;
}
skb_mark_for_recycle(skb);
wi->linear_page.frags++;
- mlx5e_page_release_fragmented(rq, &wi->linear_page);
+ mlx5e_page_release_fragmented(rq->page_pool, &wi->linear_page);
if (xdp_buff_has_frags(&mxbuf->xdp)) {
struct mlx5e_frag_page *pagep;
@@ -2117,8 +2132,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
while (++pagep < frag_page);
}
/* copy header */
- addr = page_pool_get_dma_addr(head_page->page);
- mlx5e_copy_skb_header(rq, skb, head_page->page, addr,
+ addr = page_pool_get_dma_addr_netmem(head_page->netmem);
+ mlx5e_copy_skb_header(rq, skb, head_page->netmem, addr,
head_offset, head_offset, headlen);
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen;
@@ -2148,11 +2163,11 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return NULL;
}
- va = page_address(frag_page->page) + head_offset;
+ va = netmem_address(frag_page->netmem) + head_offset;
data = va + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
- addr = page_pool_get_dma_addr(frag_page->page);
+ addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset,
frag_size, rq->buff.map_dir);
net_prefetch(data);
@@ -2191,16 +2206,19 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct mlx5_cqe64 *cqe, u16 header_index)
{
struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
- dma_addr_t page_dma_addr = page_pool_get_dma_addr(frag_page->page);
u16 head_offset = mlx5e_shampo_hd_offset(header_index);
- dma_addr_t dma_addr = page_dma_addr + head_offset;
u16 head_size = cqe->shampo.header_size;
u16 rx_headroom = rq->buff.headroom;
struct sk_buff *skb = NULL;
+ dma_addr_t page_dma_addr;
+ dma_addr_t dma_addr;
void *hdr, *data;
u32 frag_size;
- hdr = page_address(frag_page->page) + head_offset;
+ page_dma_addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
+ dma_addr = page_dma_addr + head_offset;
+
+ hdr = netmem_address(frag_page->netmem) + head_offset;
data = hdr + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + head_size);
@@ -2225,7 +2243,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
}
net_prefetchw(skb->data);
- mlx5e_copy_skb_header(rq, skb, frag_page->page, dma_addr,
+ mlx5e_copy_skb_header(rq, skb, frag_page->netmem, dma_addr,
head_offset + rx_headroom,
rx_headroom, head_size);
/* skb linear part was allocated with headlen and aligned to long */
@@ -2319,11 +2337,23 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
}
if (!*skb) {
- if (likely(head_size))
+ if (likely(head_size)) {
*skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
- else
- *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe, cqe_bcnt,
- data_offset, page_idx);
+ } else {
+ struct mlx5e_frag_page *frag_page;
+
+ frag_page = &wi->alloc_units.frag_pages[page_idx];
+ /* Drop packets with header in unreadable data area to
+ * prevent the kernel from touching it.
+ */
+ if (unlikely(netmem_is_net_iov(frag_page->netmem)))
+ goto free_hd_entry;
+ *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe,
+ cqe_bcnt,
+ data_offset,
+ page_idx);
+ }
+
if (unlikely(!*skb))
goto free_hd_entry;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 19664fa7f217..87536f158d07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -2612,6 +2612,7 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
#ifdef CONFIG_MLX5_MACSEC
&MLX5E_STATS_GRP(macsec_hw),
#endif
+ &MLX5E_STATS_GRP(pcie_cong),
};
unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index def5dea1463d..72dbcc1928ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -535,5 +535,6 @@ extern MLX5E_DECLARE_STATS_GRP(ipsec_hw);
extern MLX5E_DECLARE_STATS_GRP(ipsec_sw);
extern MLX5E_DECLARE_STATS_GRP(ptp);
extern MLX5E_DECLARE_STATS_GRP(macsec_hw);
+extern MLX5E_DECLARE_STATS_GRP(pcie_cong);
#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index fef418e1ed1a..32c07a8b03d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -5446,7 +5446,7 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
goto err_action_counter;
}
- err = dev_get_port_parent_id(priv->netdev, &ppid, false);
+ err = netif_get_port_parent_id(priv->netdev, &ppid, false);
if (!err) {
memcpy(&key, &ppid.id, sizeof(key));
mlx5_esw_offloads_devcom_init(esw, key);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 55a8629f0792..319061d31602 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -196,7 +196,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
dseg->lkey = sq->mkey_be;
dseg->byte_count = cpu_to_be32(headlen);
- mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
+ mlx5e_dma_push_single(sq, dma_addr, headlen);
num_dma++;
dseg++;
}
@@ -214,7 +214,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
dseg->lkey = sq->mkey_be;
dseg->byte_count = cpu_to_be32(fsz);
- mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
+ mlx5e_dma_push_netmem(sq, skb_frag_netmem(frag), dma_addr, fsz);
num_dma++;
dseg++;
}
@@ -256,8 +256,7 @@ mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mode = sq->min_inline_mode;
- if (skb_vlan_tag_present(skb) &&
- test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state))
+ if (skb_vlan_tag_present(skb))
mode = max_t(u8, MLX5_INLINE_MODE_L2, mode);
return mode;
@@ -483,12 +482,6 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
eseg->inline_hdr.sz |= cpu_to_be16(ihs);
dseg += wqe_attr->ds_cnt_inl;
- } else if (skb_vlan_tag_present(skb)) {
- eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
- if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD))
- eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN);
- eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
- stats->added_vlan_packets++;
}
dseg += wqe_attr->ds_cnt_ids;
@@ -623,7 +616,7 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
sq->stats->xmit_more += xmit_more;
- mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE);
+ mlx5e_dma_push_single(sq, txd.dma_addr, txd.len);
mlx5e_skb_fifo_push(&sq->db.skb_fifo, skb);
mlx5e_tx_mpwqe_add_dseg(sq, &txd);
mlx5e_tx_skb_update_ts_flags(skb);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index dfb079e59d85..1ab77159409d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -585,6 +585,9 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
async_event_mask |=
(1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
+ if (mlx5_pcie_cong_event_supported(dev))
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
+
mask[0] = async_event_mask;
if (MLX5_CAP_GEN(dev, event_cap))
@@ -873,19 +876,25 @@ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_irq_pool *pool = mlx5_irq_table_get_comp_irq_pool(dev);
struct mlx5_eq_table *table = dev->priv.eq_table;
- struct irq_affinity_desc af_desc = {};
+ struct irq_affinity_desc *af_desc;
struct mlx5_irq *irq;
- /* In case SF irq pool does not exist, fallback to the PF irqs*/
+ /* In case SF irq pool does not exist, fallback to the PF irqs */
if (!mlx5_irq_pool_is_sf_pool(pool))
return comp_irq_request_pci(dev, vecidx);
- af_desc.is_managed = false;
- cpumask_copy(&af_desc.mask, cpu_online_mask);
- cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus);
- irq = mlx5_irq_affinity_request(dev, pool, &af_desc);
- if (IS_ERR(irq))
+ af_desc = kvzalloc(sizeof(*af_desc), GFP_KERNEL);
+ if (!af_desc)
+ return -ENOMEM;
+
+ af_desc->is_managed = false;
+ cpumask_copy(&af_desc->mask, cpu_online_mask);
+ cpumask_andnot(&af_desc->mask, &af_desc->mask, &table->used_cpus);
+ irq = mlx5_irq_affinity_request(dev, pool, af_desc);
+ if (IS_ERR(irq)) {
+ kvfree(af_desc);
return PTR_ERR(irq);
+ }
cpumask_or(&table->used_cpus, &table->used_cpus, mlx5_irq_get_affinity_mask(irq));
mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
@@ -893,6 +902,8 @@ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
+ kvfree(af_desc);
+
return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index b6ae384396b3..91d863c8c152 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -64,11 +64,19 @@ static void esw_qos_domain_release(struct mlx5_eswitch *esw)
enum sched_node_type {
SCHED_NODE_TYPE_VPORTS_TSAR,
SCHED_NODE_TYPE_VPORT,
+ SCHED_NODE_TYPE_TC_ARBITER_TSAR,
+ SCHED_NODE_TYPE_RATE_LIMITER,
+ SCHED_NODE_TYPE_VPORT_TC,
+ SCHED_NODE_TYPE_VPORTS_TC_TSAR,
};
static const char * const sched_node_type_str[] = {
[SCHED_NODE_TYPE_VPORTS_TSAR] = "vports TSAR",
[SCHED_NODE_TYPE_VPORT] = "vport",
+ [SCHED_NODE_TYPE_TC_ARBITER_TSAR] = "TC Arbiter TSAR",
+ [SCHED_NODE_TYPE_RATE_LIMITER] = "Rate Limiter",
+ [SCHED_NODE_TYPE_VPORT_TC] = "vport TC",
+ [SCHED_NODE_TYPE_VPORTS_TC_TSAR] = "vports TC TSAR",
};
struct mlx5_esw_sched_node {
@@ -92,6 +100,8 @@ struct mlx5_esw_sched_node {
struct mlx5_vport *vport;
/* Level in the hierarchy. The root node level is 1. */
u8 level;
+ /* Valid only when this node represents a traffic class. */
+ u8 tc;
};
static void esw_qos_node_attach_to_parent(struct mlx5_esw_sched_node *node)
@@ -106,6 +116,13 @@ static void esw_qos_node_attach_to_parent(struct mlx5_esw_sched_node *node)
}
}
+static int esw_qos_num_tcs(struct mlx5_core_dev *dev)
+{
+ int num_tcs = mlx5_max_tc(dev) + 1;
+
+ return num_tcs < DEVLINK_RATE_TCS_MAX ? num_tcs : DEVLINK_RATE_TCS_MAX;
+}
+
static void
esw_qos_node_set_parent(struct mlx5_esw_sched_node *node, struct mlx5_esw_sched_node *parent)
{
@@ -116,8 +133,38 @@ esw_qos_node_set_parent(struct mlx5_esw_sched_node *node, struct mlx5_esw_sched_
esw_qos_node_attach_to_parent(node);
}
+static void esw_qos_nodes_set_parent(struct list_head *nodes,
+ struct mlx5_esw_sched_node *parent)
+{
+ struct mlx5_esw_sched_node *node, *tmp;
+
+ list_for_each_entry_safe(node, tmp, nodes, entry) {
+ esw_qos_node_set_parent(node, parent);
+ if (!list_empty(&node->children) &&
+ parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) {
+ struct mlx5_esw_sched_node *child;
+
+ list_for_each_entry(child, &node->children, entry) {
+ struct mlx5_vport *vport = child->vport;
+
+ if (vport)
+ vport->qos.sched_node->parent = parent;
+ }
+ }
+ }
+}
+
void mlx5_esw_qos_vport_qos_free(struct mlx5_vport *vport)
{
+ if (vport->qos.sched_nodes) {
+ int num_tcs = esw_qos_num_tcs(vport->qos.sched_node->esw->dev);
+ int i;
+
+ for (i = 0; i < num_tcs; i++)
+ kfree(vport->qos.sched_nodes[i]);
+ kfree(vport->qos.sched_nodes);
+ }
+
kfree(vport->qos.sched_node);
memset(&vport->qos, 0, sizeof(vport->qos));
}
@@ -141,16 +188,37 @@ mlx5_esw_qos_vport_get_parent(const struct mlx5_vport *vport)
static void esw_qos_sched_elem_warn(struct mlx5_esw_sched_node *node, int err, const char *op)
{
- if (node->vport) {
+ switch (node->type) {
+ case SCHED_NODE_TYPE_VPORTS_TC_TSAR:
+ esw_warn(node->esw->dev,
+ "E-Switch %s %s scheduling element failed (tc=%d,err=%d)\n",
+ op, sched_node_type_str[node->type], node->tc, err);
+ break;
+ case SCHED_NODE_TYPE_VPORT_TC:
+ esw_warn(node->esw->dev,
+ "E-Switch %s %s scheduling element failed (vport=%d,tc=%d,err=%d)\n",
+ op,
+ sched_node_type_str[node->type],
+ node->vport->vport, node->tc, err);
+ break;
+ case SCHED_NODE_TYPE_VPORT:
esw_warn(node->esw->dev,
"E-Switch %s %s scheduling element failed (vport=%d,err=%d)\n",
op, sched_node_type_str[node->type], node->vport->vport, err);
- return;
+ break;
+ case SCHED_NODE_TYPE_RATE_LIMITER:
+ case SCHED_NODE_TYPE_TC_ARBITER_TSAR:
+ case SCHED_NODE_TYPE_VPORTS_TSAR:
+ esw_warn(node->esw->dev,
+ "E-Switch %s %s scheduling element failed (err=%d)\n",
+ op, sched_node_type_str[node->type], err);
+ break;
+ default:
+ esw_warn(node->esw->dev,
+ "E-Switch %s scheduling element failed (err=%d)\n",
+ op, err);
+ break;
}
-
- esw_warn(node->esw->dev,
- "E-Switch %s %s scheduling element failed (err=%d)\n",
- op, sched_node_type_str[node->type], err);
}
static int esw_qos_node_create_sched_element(struct mlx5_esw_sched_node *node, void *ctx,
@@ -233,6 +301,24 @@ static int esw_qos_sched_elem_config(struct mlx5_esw_sched_node *node, u32 max_r
return 0;
}
+static int esw_qos_create_rate_limit_element(struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack)
+{
+ u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+
+ if (!mlx5_qos_element_type_supported(
+ node->esw->dev,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_RATE_LIMIT,
+ SCHEDULING_HIERARCHY_E_SWITCH))
+ return -EOPNOTSUPP;
+
+ MLX5_SET(scheduling_context, sched_ctx, max_average_bw, node->max_rate);
+ MLX5_SET(scheduling_context, sched_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_RATE_LIMIT);
+
+ return esw_qos_node_create_sched_element(node, sched_ctx, extack);
+}
+
static u32 esw_qos_calculate_min_rate_divider(struct mlx5_eswitch *esw,
struct mlx5_esw_sched_node *parent)
{
@@ -266,11 +352,13 @@ static u32 esw_qos_calculate_min_rate_divider(struct mlx5_eswitch *esw,
return 0;
}
-static u32 esw_qos_calc_bw_share(u32 min_rate, u32 divider, u32 fw_max)
+static u32 esw_qos_calc_bw_share(u32 value, u32 divider, u32 fw_max)
{
if (!divider)
return 0;
- return min_t(u32, max_t(u32, DIV_ROUND_UP(min_rate, divider), MLX5_MIN_BW_SHARE), fw_max);
+ return min_t(u32, fw_max,
+ max_t(u32,
+ DIV_ROUND_UP(value, divider), MLX5_MIN_BW_SHARE));
}
static void esw_qos_update_sched_node_bw_share(struct mlx5_esw_sched_node *node,
@@ -297,7 +385,13 @@ static void esw_qos_normalize_min_rate(struct mlx5_eswitch *esw,
if (node->esw != esw || node->ix == esw->qos.root_tsar_ix)
continue;
- esw_qos_update_sched_node_bw_share(node, divider, extack);
+ /* Vports TC TSARs don't have a minimum rate configured,
+ * so there's no need to update the bw_share on them.
+ */
+ if (node->type != SCHED_NODE_TYPE_VPORTS_TC_TSAR) {
+ esw_qos_update_sched_node_bw_share(node, divider,
+ extack);
+ }
if (list_empty(&node->children))
continue;
@@ -306,6 +400,20 @@ static void esw_qos_normalize_min_rate(struct mlx5_eswitch *esw,
}
}
+static u32 esw_qos_calculate_tc_bw_divider(u32 *tc_bw)
+{
+ u32 total = 0;
+ int i;
+
+ for (i = 0; i < DEVLINK_RATE_TCS_MAX; i++)
+ total += tc_bw[i];
+
+ /* If total is zero, tc-bw config is disabled and we shouldn't reach
+ * here.
+ */
+ return WARN_ON(!total) ? 1 : total;
+}
+
static int esw_qos_set_node_min_rate(struct mlx5_esw_sched_node *node,
u32 min_rate, struct netlink_ext_ack *extack)
{
@@ -350,28 +458,64 @@ esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_element_id,
tsar_ix);
}
-static int esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node,
- struct netlink_ext_ack *extack)
+static int
+esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node,
+ struct netlink_ext_ack *extack)
{
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_core_dev *dev = vport_node->esw->dev;
void *attr;
- if (!mlx5_qos_element_type_supported(dev,
- SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT,
- SCHEDULING_HIERARCHY_E_SWITCH))
+ if (!mlx5_qos_element_type_supported(
+ dev,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT,
+ SCHEDULING_HIERARCHY_E_SWITCH))
return -EOPNOTSUPP;
MLX5_SET(scheduling_context, sched_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
attr = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes);
MLX5_SET(vport_element, attr, vport_number, vport_node->vport->vport);
- MLX5_SET(scheduling_context, sched_ctx, parent_element_id, vport_node->parent->ix);
- MLX5_SET(scheduling_context, sched_ctx, max_average_bw, vport_node->max_rate);
+ MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
+ vport_node->parent->ix);
+ MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
+ vport_node->max_rate);
return esw_qos_node_create_sched_element(vport_node, sched_ctx, extack);
}
+static int
+esw_qos_vport_tc_create_sched_element(struct mlx5_esw_sched_node *vport_tc_node,
+ u32 rate_limit_elem_ix,
+ struct netlink_ext_ack *extack)
+{
+ u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+ struct mlx5_core_dev *dev = vport_tc_node->esw->dev;
+ void *attr;
+
+ if (!mlx5_qos_element_type_supported(
+ dev,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC,
+ SCHEDULING_HIERARCHY_E_SWITCH))
+ return -EOPNOTSUPP;
+
+ MLX5_SET(scheduling_context, sched_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC);
+ attr = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes);
+ MLX5_SET(vport_tc_element, attr, vport_number,
+ vport_tc_node->vport->vport);
+ MLX5_SET(vport_tc_element, attr, traffic_class, vport_tc_node->tc);
+ MLX5_SET(scheduling_context, sched_ctx, max_bw_obj_id,
+ rate_limit_elem_ix);
+ MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
+ vport_tc_node->parent->ix);
+ MLX5_SET(scheduling_context, sched_ctx, bw_share,
+ vport_tc_node->bw_share);
+
+ return esw_qos_node_create_sched_element(vport_tc_node, sched_ctx,
+ extack);
+}
+
static struct mlx5_esw_sched_node *
__esw_qos_alloc_node(struct mlx5_eswitch *esw, u32 tsar_ix, enum sched_node_type type,
struct mlx5_esw_sched_node *parent)
@@ -388,6 +532,14 @@ __esw_qos_alloc_node(struct mlx5_eswitch *esw, u32 tsar_ix, enum sched_node_type
node->parent = parent;
INIT_LIST_HEAD(&node->children);
esw_qos_node_attach_to_parent(node);
+ if (!parent) {
+ /* The caller is responsible for inserting the node into the
+ * parent list if necessary. This function can also be used with
+ * a NULL parent, which doesn't necessarily indicate that it
+ * refers to the root scheduling element.
+ */
+ list_del_init(&node->entry);
+ }
return node;
}
@@ -404,6 +556,149 @@ static void esw_qos_destroy_node(struct mlx5_esw_sched_node *node, struct netlin
__esw_qos_free_node(node);
}
+static int esw_qos_create_vports_tc_node(struct mlx5_esw_sched_node *parent,
+ u8 tc, struct netlink_ext_ack *extack)
+{
+ u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+ struct mlx5_core_dev *dev = parent->esw->dev;
+ struct mlx5_esw_sched_node *vports_tc_node;
+ void *attr;
+ int err;
+
+ if (!mlx5_qos_element_type_supported(
+ dev,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR,
+ SCHEDULING_HIERARCHY_E_SWITCH) ||
+ !mlx5_qos_tsar_type_supported(dev,
+ TSAR_ELEMENT_TSAR_TYPE_DWRR,
+ SCHEDULING_HIERARCHY_E_SWITCH))
+ return -EOPNOTSUPP;
+
+ vports_tc_node = __esw_qos_alloc_node(parent->esw, 0,
+ SCHED_NODE_TYPE_VPORTS_TC_TSAR,
+ parent);
+ if (!vports_tc_node) {
+ NL_SET_ERR_MSG_MOD(extack, "E-Switch alloc node failed");
+ esw_warn(dev, "Failed to alloc vports TC node (tc=%d)\n", tc);
+ return -ENOMEM;
+ }
+
+ attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
+ MLX5_SET(tsar_element, attr, tsar_type, TSAR_ELEMENT_TSAR_TYPE_DWRR);
+ MLX5_SET(tsar_element, attr, traffic_class, tc);
+ MLX5_SET(scheduling_context, tsar_ctx, parent_element_id, parent->ix);
+ MLX5_SET(scheduling_context, tsar_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
+
+ err = esw_qos_node_create_sched_element(vports_tc_node, tsar_ctx,
+ extack);
+ if (err)
+ goto err_create_sched_element;
+
+ vports_tc_node->tc = tc;
+
+ return 0;
+
+err_create_sched_element:
+ __esw_qos_free_node(vports_tc_node);
+ return err;
+}
+
+static void
+esw_qos_tc_arbiter_get_bw_shares(struct mlx5_esw_sched_node *tc_arbiter_node,
+ u32 *tc_bw)
+{
+ struct mlx5_esw_sched_node *vports_tc_node;
+
+ list_for_each_entry(vports_tc_node, &tc_arbiter_node->children, entry)
+ tc_bw[vports_tc_node->tc] = vports_tc_node->bw_share;
+}
+
+static void
+esw_qos_set_tc_arbiter_bw_shares(struct mlx5_esw_sched_node *tc_arbiter_node,
+ u32 *tc_bw, struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw = tc_arbiter_node->esw;
+ struct mlx5_esw_sched_node *vports_tc_node;
+ u32 divider, fw_max_bw_share;
+
+ fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
+ divider = esw_qos_calculate_tc_bw_divider(tc_bw);
+ list_for_each_entry(vports_tc_node, &tc_arbiter_node->children, entry) {
+ u8 tc = vports_tc_node->tc;
+ u32 bw_share;
+
+ bw_share = tc_bw[tc] * fw_max_bw_share;
+ bw_share = esw_qos_calc_bw_share(bw_share, divider,
+ fw_max_bw_share);
+ esw_qos_sched_elem_config(vports_tc_node, 0, bw_share, extack);
+ }
+}
+
+static void
+esw_qos_destroy_vports_tc_nodes(struct mlx5_esw_sched_node *tc_arbiter_node,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vports_tc_node, *tmp;
+
+ list_for_each_entry_safe(vports_tc_node, tmp,
+ &tc_arbiter_node->children, entry)
+ esw_qos_destroy_node(vports_tc_node, extack);
+}
+
+static int
+esw_qos_create_vports_tc_nodes(struct mlx5_esw_sched_node *tc_arbiter_node,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw = tc_arbiter_node->esw;
+ int err, i, num_tcs = esw_qos_num_tcs(esw->dev);
+
+ for (i = 0; i < num_tcs; i++) {
+ err = esw_qos_create_vports_tc_node(tc_arbiter_node, i, extack);
+ if (err)
+ goto err_tc_node_create;
+ }
+
+ return 0;
+
+err_tc_node_create:
+ esw_qos_destroy_vports_tc_nodes(tc_arbiter_node, NULL);
+ return err;
+}
+
+static int esw_qos_create_tc_arbiter_sched_elem(
+ struct mlx5_esw_sched_node *tc_arbiter_node,
+ struct netlink_ext_ack *extack)
+{
+ u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+ u32 tsar_parent_ix;
+ void *attr;
+
+ if (!mlx5_qos_tsar_type_supported(tc_arbiter_node->esw->dev,
+ TSAR_ELEMENT_TSAR_TYPE_TC_ARB,
+ SCHEDULING_HIERARCHY_E_SWITCH)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "E-Switch TC Arbiter scheduling element is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
+ MLX5_SET(tsar_element, attr, tsar_type, TSAR_ELEMENT_TSAR_TYPE_TC_ARB);
+ tsar_parent_ix = tc_arbiter_node->parent ? tc_arbiter_node->parent->ix :
+ tc_arbiter_node->esw->qos.root_tsar_ix;
+ MLX5_SET(scheduling_context, tsar_ctx, parent_element_id,
+ tsar_parent_ix);
+ MLX5_SET(scheduling_context, tsar_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
+ MLX5_SET(scheduling_context, tsar_ctx, max_average_bw,
+ tc_arbiter_node->max_rate);
+ MLX5_SET(scheduling_context, tsar_ctx, bw_share,
+ tc_arbiter_node->bw_share);
+
+ return esw_qos_node_create_sched_element(tc_arbiter_node, tsar_ctx,
+ extack);
+}
+
static struct mlx5_esw_sched_node *
__esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct mlx5_esw_sched_node *parent,
struct netlink_ext_ack *extack)
@@ -426,6 +721,7 @@ __esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct mlx5_esw_sch
goto err_alloc_node;
}
+ list_add_tail(&node->entry, &esw->qos.domain->nodes);
esw_qos_normalize_min_rate(esw, NULL, extack);
trace_mlx5_esw_node_qos_create(esw->dev, node, node->ix);
@@ -467,6 +763,9 @@ static void __esw_qos_destroy_node(struct mlx5_esw_sched_node *node, struct netl
{
struct mlx5_eswitch *esw = node->esw;
+ if (node->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
+ esw_qos_destroy_vports_tc_nodes(node, extack);
+
trace_mlx5_esw_node_qos_destroy(esw->dev, node, node->ix);
esw_qos_destroy_node(node, extack);
esw_qos_normalize_min_rate(esw, NULL, extack);
@@ -498,6 +797,9 @@ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
SCHED_NODE_TYPE_VPORTS_TSAR,
NULL))
esw->qos.node0 = ERR_PTR(-ENOMEM);
+ else
+ list_add_tail(&esw->qos.node0->entry,
+ &esw->qos.domain->nodes);
}
if (IS_ERR(esw->qos.node0)) {
err = PTR_ERR(esw->qos.node0);
@@ -555,12 +857,239 @@ static void esw_qos_put(struct mlx5_eswitch *esw)
esw_qos_destroy(esw);
}
+static void
+esw_qos_tc_arbiter_scheduling_teardown(struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack)
+{
+ /* Clean up all Vports TC nodes within the TC arbiter node. */
+ esw_qos_destroy_vports_tc_nodes(node, extack);
+ /* Destroy the scheduling element for the TC arbiter node itself. */
+ esw_qos_node_destroy_sched_element(node, extack);
+}
+
+static int esw_qos_tc_arbiter_scheduling_setup(struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack)
+{
+ u32 curr_ix = node->ix;
+ int err;
+
+ err = esw_qos_create_tc_arbiter_sched_elem(node, extack);
+ if (err)
+ return err;
+ /* Initialize the vports TC nodes within created TC arbiter TSAR. */
+ err = esw_qos_create_vports_tc_nodes(node, extack);
+ if (err)
+ goto err_vports_tc_nodes;
+
+ node->type = SCHED_NODE_TYPE_TC_ARBITER_TSAR;
+
+ return 0;
+
+err_vports_tc_nodes:
+ /* If initialization fails, clean up the scheduling element
+ * for the TC arbiter node.
+ */
+ esw_qos_node_destroy_sched_element(node, NULL);
+ node->ix = curr_ix;
+ return err;
+}
+
+static int
+esw_qos_create_vport_tc_sched_node(struct mlx5_vport *vport,
+ u32 rate_limit_elem_ix,
+ struct mlx5_esw_sched_node *vports_tc_node,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+ struct mlx5_esw_sched_node *vport_tc_node;
+ u8 tc = vports_tc_node->tc;
+ int err;
+
+ vport_tc_node = __esw_qos_alloc_node(vport_node->esw, 0,
+ SCHED_NODE_TYPE_VPORT_TC,
+ vports_tc_node);
+ if (!vport_tc_node)
+ return -ENOMEM;
+
+ vport_tc_node->min_rate = vport_node->min_rate;
+ vport_tc_node->tc = tc;
+ vport_tc_node->vport = vport;
+ err = esw_qos_vport_tc_create_sched_element(vport_tc_node,
+ rate_limit_elem_ix,
+ extack);
+ if (err)
+ goto err_out;
+
+ vport->qos.sched_nodes[tc] = vport_tc_node;
+
+ return 0;
+err_out:
+ __esw_qos_free_node(vport_tc_node);
+ return err;
+}
+
+static void
+esw_qos_destroy_vport_tc_sched_elements(struct mlx5_vport *vport,
+ struct netlink_ext_ack *extack)
+{
+ int i, num_tcs = esw_qos_num_tcs(vport->qos.sched_node->esw->dev);
+
+ for (i = 0; i < num_tcs; i++) {
+ if (vport->qos.sched_nodes[i]) {
+ __esw_qos_destroy_node(vport->qos.sched_nodes[i],
+ extack);
+ }
+ }
+
+ kfree(vport->qos.sched_nodes);
+ vport->qos.sched_nodes = NULL;
+}
+
+static int
+esw_qos_create_vport_tc_sched_elements(struct mlx5_vport *vport,
+ enum sched_node_type type,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+ struct mlx5_esw_sched_node *tc_arbiter_node, *vports_tc_node;
+ int err, num_tcs = esw_qos_num_tcs(vport_node->esw->dev);
+ u32 rate_limit_elem_ix;
+
+ vport->qos.sched_nodes = kcalloc(num_tcs,
+ sizeof(struct mlx5_esw_sched_node *),
+ GFP_KERNEL);
+ if (!vport->qos.sched_nodes) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Allocating the vport TC scheduling elements failed.");
+ return -ENOMEM;
+ }
+
+ rate_limit_elem_ix = type == SCHED_NODE_TYPE_RATE_LIMITER ?
+ vport_node->ix : 0;
+ tc_arbiter_node = type == SCHED_NODE_TYPE_RATE_LIMITER ?
+ vport_node->parent : vport_node;
+ list_for_each_entry(vports_tc_node, &tc_arbiter_node->children, entry) {
+ err = esw_qos_create_vport_tc_sched_node(vport,
+ rate_limit_elem_ix,
+ vports_tc_node,
+ extack);
+ if (err)
+ goto err_create_vport_tc;
+ }
+
+ return 0;
+
+err_create_vport_tc:
+ esw_qos_destroy_vport_tc_sched_elements(vport, NULL);
+
+ return err;
+}
+
+static int
+esw_qos_vport_tc_enable(struct mlx5_vport *vport, enum sched_node_type type,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+ int err, new_level, max_level;
+
+ if (type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) {
+ /* Increase the parent's level by 2 to account for both the
+ * TC arbiter and the vports TC scheduling element.
+ */
+ new_level = vport_node->parent->level + 2;
+ max_level = 1 << MLX5_CAP_QOS(vport_node->esw->dev,
+ log_esw_max_sched_depth);
+ if (new_level > max_level) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "TC arbitration on leafs is not supported beyond max scheduling depth");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ esw_assert_qos_lock_held(vport->dev->priv.eswitch);
+
+ if (type == SCHED_NODE_TYPE_RATE_LIMITER)
+ err = esw_qos_create_rate_limit_element(vport_node, extack);
+ else
+ err = esw_qos_tc_arbiter_scheduling_setup(vport_node, extack);
+ if (err)
+ return err;
+
+ /* Rate limiters impact multiple nodes not directly connected to them
+ * and are not direct members of the QoS hierarchy.
+ * Unlink it from the parent to reflect that.
+ */
+ if (type == SCHED_NODE_TYPE_RATE_LIMITER) {
+ list_del_init(&vport_node->entry);
+ vport_node->level = 0;
+ }
+
+ err = esw_qos_create_vport_tc_sched_elements(vport, type, extack);
+ if (err)
+ goto err_sched_nodes;
+
+ return 0;
+
+err_sched_nodes:
+ if (type == SCHED_NODE_TYPE_RATE_LIMITER) {
+ esw_qos_node_destroy_sched_element(vport_node, NULL);
+ list_add_tail(&vport_node->entry,
+ &vport_node->parent->children);
+ vport_node->level = vport_node->parent->level + 1;
+ } else {
+ esw_qos_tc_arbiter_scheduling_teardown(vport_node, NULL);
+ }
+ return err;
+}
+
+static void esw_qos_vport_tc_disable(struct mlx5_vport *vport,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+ enum sched_node_type curr_type = vport_node->type;
+
+ esw_qos_destroy_vport_tc_sched_elements(vport, extack);
+
+ if (curr_type == SCHED_NODE_TYPE_RATE_LIMITER)
+ esw_qos_node_destroy_sched_element(vport_node, extack);
+ else
+ esw_qos_tc_arbiter_scheduling_teardown(vport_node, extack);
+}
+
+static int esw_qos_set_vport_tcs_min_rate(struct mlx5_vport *vport,
+ u32 min_rate,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+ int err, i, num_tcs = esw_qos_num_tcs(vport_node->esw->dev);
+
+ for (i = 0; i < num_tcs; i++) {
+ err = esw_qos_set_node_min_rate(vport->qos.sched_nodes[i],
+ min_rate, extack);
+ if (err)
+ goto err_out;
+ }
+ vport_node->min_rate = min_rate;
+
+ return 0;
+err_out:
+ for (--i; i >= 0; i--) {
+ esw_qos_set_node_min_rate(vport->qos.sched_nodes[i],
+ vport_node->min_rate, extack);
+ }
+ return err;
+}
+
static void esw_qos_vport_disable(struct mlx5_vport *vport, struct netlink_ext_ack *extack)
{
struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
struct mlx5_esw_sched_node *parent = vport_node->parent;
+ enum sched_node_type curr_type = vport_node->type;
- esw_qos_node_destroy_sched_element(vport_node, extack);
+ if (curr_type == SCHED_NODE_TYPE_VPORT)
+ esw_qos_node_destroy_sched_element(vport_node, extack);
+ else
+ esw_qos_vport_tc_disable(vport, extack);
vport_node->bw_share = 0;
list_del_init(&vport_node->entry);
@@ -569,7 +1098,9 @@ static void esw_qos_vport_disable(struct mlx5_vport *vport, struct netlink_ext_a
trace_mlx5_esw_vport_qos_destroy(vport_node->esw->dev, vport);
}
-static int esw_qos_vport_enable(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent,
+static int esw_qos_vport_enable(struct mlx5_vport *vport,
+ enum sched_node_type type,
+ struct mlx5_esw_sched_node *parent,
struct netlink_ext_ack *extack)
{
int err;
@@ -577,10 +1108,16 @@ static int esw_qos_vport_enable(struct mlx5_vport *vport, struct mlx5_esw_sched_
esw_assert_qos_lock_held(vport->dev->priv.eswitch);
esw_qos_node_set_parent(vport->qos.sched_node, parent);
- err = esw_qos_vport_create_sched_element(vport->qos.sched_node, extack);
+ if (type == SCHED_NODE_TYPE_VPORT) {
+ err = esw_qos_vport_create_sched_element(vport->qos.sched_node,
+ extack);
+ } else {
+ err = esw_qos_vport_tc_enable(vport, type, extack);
+ }
if (err)
return err;
+ vport->qos.sched_node->type = type;
esw_qos_normalize_min_rate(parent->esw, parent, extack);
trace_mlx5_esw_vport_qos_create(vport->dev, vport,
vport->qos.sched_node->max_rate,
@@ -611,9 +1148,8 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t
sched_node->min_rate = min_rate;
sched_node->vport = vport;
vport->qos.sched_node = sched_node;
- err = esw_qos_vport_enable(vport, parent, extack);
+ err = esw_qos_vport_enable(vport, type, parent, extack);
if (err) {
- __esw_qos_free_node(sched_node);
esw_qos_put(esw);
vport->qos.sched_node = NULL;
}
@@ -666,6 +1202,8 @@ static int mlx5_esw_qos_set_vport_min_rate(struct mlx5_vport *vport, u32 min_rat
if (!vport_node)
return mlx5_esw_qos_vport_enable(vport, SCHED_NODE_TYPE_VPORT, NULL, 0, min_rate,
extack);
+ else if (vport_node->type == SCHED_NODE_TYPE_RATE_LIMITER)
+ return esw_qos_set_vport_tcs_min_rate(vport, min_rate, extack);
else
return esw_qos_set_node_min_rate(vport_node, min_rate, extack);
}
@@ -698,12 +1236,73 @@ bool mlx5_esw_qos_get_vport_rate(struct mlx5_vport *vport, u32 *max_rate, u32 *m
return enabled;
}
+static int esw_qos_vport_tc_check_type(enum sched_node_type curr_type,
+ enum sched_node_type new_type,
+ struct netlink_ext_ack *extack)
+{
+ if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR &&
+ new_type == SCHED_NODE_TYPE_RATE_LIMITER) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot switch from vport-level TC arbitration to node-level TC arbitration");
+ return -EOPNOTSUPP;
+ }
+
+ if (curr_type == SCHED_NODE_TYPE_RATE_LIMITER &&
+ new_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot switch from node-level TC arbitration to vport-level TC arbitration");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int esw_qos_vport_update(struct mlx5_vport *vport,
+ enum sched_node_type type,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *curr_parent = vport->qos.sched_node->parent;
+ enum sched_node_type curr_type = vport->qos.sched_node->type;
+ u32 curr_tc_bw[DEVLINK_RATE_TCS_MAX] = {0};
+ int err;
+
+ esw_assert_qos_lock_held(vport->dev->priv.eswitch);
+ parent = parent ?: curr_parent;
+ if (curr_type == type && curr_parent == parent)
+ return 0;
+
+ err = esw_qos_vport_tc_check_type(curr_type, type, extack);
+ if (err)
+ return err;
+
+ if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type) {
+ esw_qos_tc_arbiter_get_bw_shares(vport->qos.sched_node,
+ curr_tc_bw);
+ }
+
+ esw_qos_vport_disable(vport, extack);
+
+ err = esw_qos_vport_enable(vport, type, parent, extack);
+ if (err) {
+ esw_qos_vport_enable(vport, curr_type, curr_parent, NULL);
+ extack = NULL;
+ }
+
+ if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type) {
+ esw_qos_set_tc_arbiter_bw_shares(vport->qos.sched_node,
+ curr_tc_bw, extack);
+ }
+
+ return err;
+}
+
static int esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
struct mlx5_esw_sched_node *curr_parent;
- int err;
+ enum sched_node_type type;
esw_assert_qos_lock_held(esw);
curr_parent = vport->qos.sched_node->parent;
@@ -711,15 +1310,206 @@ static int esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw
if (curr_parent == parent)
return 0;
- esw_qos_vport_disable(vport, extack);
+ /* Set vport QoS type based on parent node type if different from
+ * default QoS; otherwise, use the vport's current QoS type.
+ */
+ if (parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
+ type = SCHED_NODE_TYPE_RATE_LIMITER;
+ else if (curr_parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
+ type = SCHED_NODE_TYPE_VPORT;
+ else
+ type = vport->qos.sched_node->type;
+
+ return esw_qos_vport_update(vport, type, parent, extack);
+}
- err = esw_qos_vport_enable(vport, parent, extack);
+static void
+esw_qos_switch_vport_tcs_to_vport(struct mlx5_esw_sched_node *tc_arbiter_node,
+ struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vports_tc_node, *vport_tc_node, *tmp;
+
+ vports_tc_node = list_first_entry(&tc_arbiter_node->children,
+ struct mlx5_esw_sched_node,
+ entry);
+
+ list_for_each_entry_safe(vport_tc_node, tmp, &vports_tc_node->children,
+ entry)
+ esw_qos_vport_update_parent(vport_tc_node->vport, node, extack);
+}
+
+static int esw_qos_switch_tc_arbiter_node_to_vports(
+ struct mlx5_esw_sched_node *tc_arbiter_node,
+ struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack)
+{
+ u32 parent_tsar_ix = node->parent ?
+ node->parent->ix : node->esw->qos.root_tsar_ix;
+ int err;
+
+ err = esw_qos_create_node_sched_elem(node->esw->dev, parent_tsar_ix,
+ node->max_rate, node->bw_share,
+ &node->ix);
if (err) {
- if (esw_qos_vport_enable(vport, curr_parent, NULL))
- esw_warn(parent->esw->dev, "vport restore QoS failed (vport=%d)\n",
- vport->vport);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to create scheduling element for vports node when disabling vports TC QoS");
+ return err;
+ }
+
+ node->type = SCHED_NODE_TYPE_VPORTS_TSAR;
+
+ /* Disable TC QoS for vports in the arbiter node. */
+ esw_qos_switch_vport_tcs_to_vport(tc_arbiter_node, node, extack);
+
+ return 0;
+}
+
+static int esw_qos_switch_vports_node_to_tc_arbiter(
+ struct mlx5_esw_sched_node *node,
+ struct mlx5_esw_sched_node *tc_arbiter_node,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vport_node, *tmp;
+ struct mlx5_vport *vport;
+ int err;
+
+ /* Enable TC QoS for each vport in the node. */
+ list_for_each_entry_safe(vport_node, tmp, &node->children, entry) {
+ vport = vport_node->vport;
+ err = esw_qos_vport_update_parent(vport, tc_arbiter_node,
+ extack);
+ if (err)
+ goto err_out;
+ }
+
+ /* Destroy the current vports node TSAR. */
+ err = mlx5_destroy_scheduling_element_cmd(node->esw->dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ node->ix);
+ if (err)
+ goto err_out;
+
+ return 0;
+err_out:
+ /* Restore vports back into the node if an error occurs. */
+ esw_qos_switch_vport_tcs_to_vport(tc_arbiter_node, node, NULL);
+
+ return err;
+}
+
+static struct mlx5_esw_sched_node *
+esw_qos_move_node(struct mlx5_esw_sched_node *curr_node)
+{
+ struct mlx5_esw_sched_node *new_node;
+
+ new_node = __esw_qos_alloc_node(curr_node->esw, curr_node->ix,
+ curr_node->type, NULL);
+ if (!new_node)
+ return ERR_PTR(-ENOMEM);
+
+ esw_qos_nodes_set_parent(&curr_node->children, new_node);
+ return new_node;
+}
+
+static int esw_qos_node_disable_tc_arbitration(struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *curr_node;
+ int err;
+
+ if (node->type != SCHED_NODE_TYPE_TC_ARBITER_TSAR)
+ return 0;
+
+ /* Allocate a new rate node to hold the current state, which will allow
+ * for restoring the vports back to this node after disabling TC
+ * arbitration.
+ */
+ curr_node = esw_qos_move_node(node);
+ if (IS_ERR(curr_node)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed setting up vports node");
+ return PTR_ERR(curr_node);
+ }
+
+ /* Disable TC QoS for all vports, and assign them back to the node. */
+ err = esw_qos_switch_tc_arbiter_node_to_vports(curr_node, node, extack);
+ if (err)
+ goto err_out;
+
+ /* Clean up the TC arbiter node after disabling TC QoS for vports. */
+ esw_qos_tc_arbiter_scheduling_teardown(curr_node, extack);
+ goto out;
+err_out:
+ esw_qos_nodes_set_parent(&curr_node->children, node);
+out:
+ __esw_qos_free_node(curr_node);
+ return err;
+}
+
+static int esw_qos_node_enable_tc_arbitration(struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *curr_node, *child;
+ int err, new_level, max_level;
+
+ if (node->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
+ return 0;
+
+ /* Increase the hierarchy level by one to account for the additional
+ * vports TC scheduling node, and verify that the new level does not
+ * exceed the maximum allowed depth.
+ */
+ new_level = node->level + 1;
+ max_level = 1 << MLX5_CAP_QOS(node->esw->dev, log_esw_max_sched_depth);
+ if (new_level > max_level) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "TC arbitration on nodes is not supported beyond max scheduling depth");
+ return -EOPNOTSUPP;
+ }
+
+ /* Ensure the node does not contain non-leaf children before assigning
+ * TC bandwidth.
+ */
+ if (!list_empty(&node->children)) {
+ list_for_each_entry(child, &node->children, entry) {
+ if (!child->vport) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot configure TC bandwidth on a node with non-leaf children");
+ return -EOPNOTSUPP;
+ }
+ }
}
+ /* Allocate a new node that will store the information of the current
+ * node. This will be used later to restore the node if necessary.
+ */
+ curr_node = esw_qos_move_node(node);
+ if (IS_ERR(curr_node)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed setting up node TC QoS");
+ return PTR_ERR(curr_node);
+ }
+
+ /* Initialize the TC arbiter node for QoS management.
+ * This step prepares the node for handling Traffic Class arbitration.
+ */
+ err = esw_qos_tc_arbiter_scheduling_setup(node, extack);
+ if (err)
+ goto err_setup;
+
+ /* Enable TC QoS for each vport within the current node. */
+ err = esw_qos_switch_vports_node_to_tc_arbiter(curr_node, node, extack);
+ if (err)
+ goto err_switch_vports;
+ goto out;
+
+err_switch_vports:
+ esw_qos_tc_arbiter_scheduling_teardown(node, NULL);
+ node->ix = curr_node->ix;
+ node->type = curr_node->type;
+err_setup:
+ esw_qos_nodes_set_parent(&curr_node->children, node);
+out:
+ __esw_qos_free_node(curr_node);
return err;
}
@@ -848,6 +1638,41 @@ static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *
return 0;
}
+static bool esw_qos_validate_unsupported_tc_bw(struct mlx5_eswitch *esw,
+ u32 *tc_bw)
+{
+ int i, num_tcs = esw_qos_num_tcs(esw->dev);
+
+ for (i = num_tcs; i < DEVLINK_RATE_TCS_MAX; i++) {
+ if (tc_bw[i])
+ return false;
+ }
+
+ return true;
+}
+
+static bool esw_qos_vport_validate_unsupported_tc_bw(struct mlx5_vport *vport,
+ u32 *tc_bw)
+{
+ struct mlx5_eswitch *esw = vport->qos.sched_node ?
+ vport->qos.sched_node->parent->esw :
+ vport->dev->priv.eswitch;
+
+ return esw_qos_validate_unsupported_tc_bw(esw, tc_bw);
+}
+
+static bool esw_qos_tc_bw_disabled(u32 *tc_bw)
+{
+ int i;
+
+ for (i = 0; i < DEVLINK_RATE_TCS_MAX; i++) {
+ if (tc_bw[i])
+ return false;
+ }
+
+ return true;
+}
+
int mlx5_esw_qos_init(struct mlx5_eswitch *esw)
{
if (esw->qos.domain)
@@ -906,6 +1731,90 @@ int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *
return err;
}
+int mlx5_esw_devlink_rate_leaf_tc_bw_set(struct devlink_rate *rate_leaf,
+ void *priv,
+ u32 *tc_bw,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vport_node;
+ struct mlx5_vport *vport = priv;
+ struct mlx5_eswitch *esw;
+ bool disable;
+ int err = 0;
+
+ esw = vport->dev->priv.eswitch;
+ if (!mlx5_esw_allowed(esw))
+ return -EPERM;
+
+ disable = esw_qos_tc_bw_disabled(tc_bw);
+ esw_qos_lock(esw);
+
+ if (!esw_qos_vport_validate_unsupported_tc_bw(vport, tc_bw)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "E-Switch traffic classes number is not supported");
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ vport_node = vport->qos.sched_node;
+ if (disable && !vport_node)
+ goto unlock;
+
+ if (disable) {
+ if (vport_node->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
+ err = esw_qos_vport_update(vport, SCHED_NODE_TYPE_VPORT,
+ NULL, extack);
+ goto unlock;
+ }
+
+ if (!vport_node) {
+ err = mlx5_esw_qos_vport_enable(vport,
+ SCHED_NODE_TYPE_TC_ARBITER_TSAR,
+ NULL, 0, 0, extack);
+ vport_node = vport->qos.sched_node;
+ } else {
+ err = esw_qos_vport_update(vport,
+ SCHED_NODE_TYPE_TC_ARBITER_TSAR,
+ NULL, extack);
+ }
+ if (!err)
+ esw_qos_set_tc_arbiter_bw_shares(vport_node, tc_bw, extack);
+unlock:
+ esw_qos_unlock(esw);
+ return err;
+}
+
+int mlx5_esw_devlink_rate_node_tc_bw_set(struct devlink_rate *rate_node,
+ void *priv,
+ u32 *tc_bw,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *node = priv;
+ struct mlx5_eswitch *esw = node->esw;
+ bool disable;
+ int err;
+
+ if (!esw_qos_validate_unsupported_tc_bw(esw, tc_bw)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "E-Switch traffic classes number is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ disable = esw_qos_tc_bw_disabled(tc_bw);
+ esw_qos_lock(esw);
+ if (disable) {
+ err = esw_qos_node_disable_tc_arbitration(node, extack);
+ goto unlock;
+ }
+
+ err = esw_qos_node_enable_tc_arbitration(node, extack);
+ if (!err)
+ esw_qos_set_tc_arbiter_bw_shares(node, tc_bw, extack);
+unlock:
+ esw_qos_unlock(esw);
+ return err;
+}
+
int mlx5_esw_devlink_rate_node_tx_share_set(struct devlink_rate *rate_node, void *priv,
u64 tx_share, struct netlink_ext_ack *extack)
{
@@ -996,10 +1905,16 @@ int mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_s
}
esw_qos_lock(esw);
- if (!vport->qos.sched_node && parent)
- err = mlx5_esw_qos_vport_enable(vport, SCHED_NODE_TYPE_VPORT, parent, 0, 0, extack);
- else if (vport->qos.sched_node)
+ if (!vport->qos.sched_node && parent) {
+ enum sched_node_type type;
+
+ type = parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR ?
+ SCHED_NODE_TYPE_RATE_LIMITER : SCHED_NODE_TYPE_VPORT;
+ err = mlx5_esw_qos_vport_enable(vport, type, parent, 0, 0,
+ extack);
+ } else if (vport->qos.sched_node) {
err = esw_qos_vport_update_parent(vport, parent, extack);
+ }
esw_qos_unlock(esw);
return err;
}
@@ -1019,6 +1934,20 @@ int mlx5_esw_devlink_rate_leaf_parent_set(struct devlink_rate *devlink_rate,
return mlx5_esw_qos_vport_update_parent(vport, node, extack);
}
+static bool esw_qos_is_node_empty(struct mlx5_esw_sched_node *node)
+{
+ if (list_empty(&node->children))
+ return true;
+
+ if (node->type != SCHED_NODE_TYPE_TC_ARBITER_TSAR)
+ return false;
+
+ node = list_first_entry(&node->children, struct mlx5_esw_sched_node,
+ entry);
+
+ return esw_qos_is_node_empty(node);
+}
+
static int
mlx5_esw_qos_node_validate_set_parent(struct mlx5_esw_sched_node *node,
struct mlx5_esw_sched_node *parent,
@@ -1032,13 +1961,26 @@ mlx5_esw_qos_node_validate_set_parent(struct mlx5_esw_sched_node *node,
return -EOPNOTSUPP;
}
- if (!list_empty(&node->children)) {
+ if (!esw_qos_is_node_empty(node)) {
NL_SET_ERR_MSG_MOD(extack,
"Cannot reassign a node that contains rate objects");
return -EOPNOTSUPP;
}
+ if (parent && parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot attach a node to a parent with TC bandwidth configured");
+ return -EOPNOTSUPP;
+ }
+
new_level = parent ? parent->level + 1 : 2;
+ if (node->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) {
+ /* Increase by one to account for the vports TC scheduling
+ * element.
+ */
+ new_level += 1;
+ }
+
max_level = 1 << MLX5_CAP_QOS(node->esw->dev, log_esw_max_sched_depth);
if (new_level > max_level) {
NL_SET_ERR_MSG_MOD(extack,
@@ -1049,6 +1991,32 @@ mlx5_esw_qos_node_validate_set_parent(struct mlx5_esw_sched_node *node,
return 0;
}
+static int
+esw_qos_tc_arbiter_node_update_parent(struct mlx5_esw_sched_node *node,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *curr_parent = node->parent;
+ u32 curr_tc_bw[DEVLINK_RATE_TCS_MAX] = {0};
+ struct mlx5_eswitch *esw = node->esw;
+ int err;
+
+ esw_qos_tc_arbiter_get_bw_shares(node, curr_tc_bw);
+ esw_qos_tc_arbiter_scheduling_teardown(node, extack);
+ esw_qos_node_set_parent(node, parent);
+ err = esw_qos_tc_arbiter_scheduling_setup(node, extack);
+ if (err) {
+ esw_qos_node_set_parent(node, curr_parent);
+ if (esw_qos_tc_arbiter_scheduling_setup(node, extack)) {
+ esw_warn(esw->dev, "Node restore QoS failed\n");
+ return err;
+ }
+ }
+ esw_qos_set_tc_arbiter_bw_shares(node, curr_tc_bw, extack);
+
+ return err;
+}
+
static int esw_qos_vports_node_update_parent(struct mlx5_esw_sched_node *node,
struct mlx5_esw_sched_node *parent,
struct netlink_ext_ack *extack)
@@ -1076,6 +2044,7 @@ static int esw_qos_vports_node_update_parent(struct mlx5_esw_sched_node *node,
return err;
}
esw_qos_node_set_parent(node, parent);
+ node->bw_share = 0;
return 0;
}
@@ -1094,7 +2063,13 @@ static int mlx5_esw_qos_node_update_parent(struct mlx5_esw_sched_node *node,
esw_qos_lock(esw);
curr_parent = node->parent;
- err = esw_qos_vports_node_update_parent(node, parent, extack);
+ if (node->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) {
+ err = esw_qos_tc_arbiter_node_update_parent(node, parent,
+ extack);
+ } else {
+ err = esw_qos_vports_node_update_parent(node, parent, extack);
+ }
+
if (err)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
index ed40ec8f027e..0a50982b0e27 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
@@ -21,6 +21,14 @@ int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void
u64 tx_share, struct netlink_ext_ack *extack);
int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *priv,
u64 tx_max, struct netlink_ext_ack *extack);
+int mlx5_esw_devlink_rate_leaf_tc_bw_set(struct devlink_rate *rate_node,
+ void *priv,
+ u32 *tc_bw,
+ struct netlink_ext_ack *extack);
+int mlx5_esw_devlink_rate_node_tc_bw_set(struct devlink_rate *rate_node,
+ void *priv,
+ u32 *tc_bw,
+ struct netlink_ext_ack *extack);
int mlx5_esw_devlink_rate_node_tx_share_set(struct devlink_rate *rate_node, void *priv,
u64 tx_share, struct netlink_ext_ack *extack);
int mlx5_esw_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 8573d36785f4..b0b8ef3ec3c4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -212,10 +212,20 @@ struct mlx5_vport {
struct mlx5_vport_info info;
- /* Protected with the E-Switch qos domain lock. */
+ /* Protected with the E-Switch qos domain lock. The Vport QoS can
+ * either be disabled (sched_node is NULL) or in one of three states:
+ * 1. Regular QoS (sched_node is a vport node).
+ * 2. TC QoS enabled on the vport (sched_node is a TC arbiter).
+ * 3. TC QoS enabled on the vport's parent node
+ * (sched_node is a rate limit node).
+ * When TC is enabled in either mode, the vport owns vport TC scheduling
+ * nodes.
+ */
struct {
- /* Vport scheduling element node. */
+ /* Vport scheduling node. */
struct mlx5_esw_sched_node *sched_node;
+ /* Array of vport traffic class scheduling nodes. */
+ struct mlx5_esw_sched_node **sched_nodes;
} qos;
u16 vport;
@@ -817,7 +827,7 @@ void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num);
/**
- * mlx5_esw_event_info - Indicates eswitch mode changed/changing.
+ * struct mlx5_esw_event_info - Indicates eswitch mode changed/changing.
*
* @new_mode: New mode of eswitch.
*/
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 0e3a977d5332..bee906661282 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1182,19 +1182,19 @@ static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
struct mlx5_core_dev *peer_dev)
{
+ struct mlx5_eswitch *peer_esw = peer_dev->priv.eswitch;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_handle **flows;
- /* total vports is the same for both e-switches */
- int nvports = esw->total_vports;
struct mlx5_flow_handle *flow;
+ struct mlx5_vport *peer_vport;
struct mlx5_flow_spec *spec;
- struct mlx5_vport *vport;
int err, pfindex;
unsigned long i;
void *misc;
- if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev))
+ if (!MLX5_VPORT_MANAGER(peer_dev) &&
+ !mlx5_core_is_ecpf_esw_manager(peer_dev))
return 0;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
@@ -1203,7 +1203,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
peer_miss_rules_setup(esw, peer_dev, spec, &dest);
- flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL);
+ flows = kvcalloc(peer_esw->total_vports, sizeof(*flows), GFP_KERNEL);
if (!flows) {
err = -ENOMEM;
goto alloc_flows_err;
@@ -1213,10 +1213,10 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
- if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
- esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
- spec, MLX5_VPORT_PF);
+ if (mlx5_core_is_ecpf_esw_manager(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
+ esw_set_peer_miss_rule_source_port(esw, peer_esw, spec,
+ MLX5_VPORT_PF);
flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
@@ -1224,11 +1224,11 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
err = PTR_ERR(flow);
goto add_pf_flow_err;
}
- flows[vport->index] = flow;
+ flows[peer_vport->index] = flow;
}
- if (mlx5_ecpf_vport_exists(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
+ if (mlx5_ecpf_vport_exists(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF);
MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
@@ -1236,13 +1236,14 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
err = PTR_ERR(flow);
goto add_ecpf_flow_err;
}
- flows[vport->index] = flow;
+ flows[peer_vport->index] = flow;
}
- mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
+ mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_vfs(peer_dev)) {
esw_set_peer_miss_rule_source_port(esw,
- peer_dev->priv.eswitch,
- spec, vport->vport);
+ peer_esw,
+ spec, peer_vport->vport);
flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
@@ -1250,22 +1251,22 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
err = PTR_ERR(flow);
goto add_vf_flow_err;
}
- flows[vport->index] = flow;
+ flows[peer_vport->index] = flow;
}
- if (mlx5_core_ec_sriov_enabled(esw->dev)) {
- mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
- if (i >= mlx5_core_max_ec_vfs(peer_dev))
- break;
- esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
- spec, vport->vport);
+ if (mlx5_core_ec_sriov_enabled(peer_dev)) {
+ mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_ec_vfs(peer_dev)) {
+ esw_set_peer_miss_rule_source_port(esw, peer_esw,
+ spec,
+ peer_vport->vport);
flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
spec, &flow_act, &dest, 1);
if (IS_ERR(flow)) {
err = PTR_ERR(flow);
goto add_ec_vf_flow_err;
}
- flows[vport->index] = flow;
+ flows[peer_vport->index] = flow;
}
}
@@ -1282,25 +1283,27 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
return 0;
add_ec_vf_flow_err:
- mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
- if (!flows[vport->index])
+ mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_ec_vfs(peer_dev)) {
+ if (!flows[peer_vport->index])
continue;
- mlx5_del_flow_rules(flows[vport->index]);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
add_vf_flow_err:
- mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
- if (!flows[vport->index])
+ mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_vfs(peer_dev)) {
+ if (!flows[peer_vport->index])
continue;
- mlx5_del_flow_rules(flows[vport->index]);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
- if (mlx5_ecpf_vport_exists(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
- mlx5_del_flow_rules(flows[vport->index]);
+ if (mlx5_ecpf_vport_exists(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
add_ecpf_flow_err:
- if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
- mlx5_del_flow_rules(flows[vport->index]);
+ if (mlx5_core_is_ecpf_esw_manager(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
add_pf_flow_err:
esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
@@ -1313,37 +1316,34 @@ alloc_flows_err:
static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
struct mlx5_core_dev *peer_dev)
{
+ struct mlx5_eswitch *peer_esw = peer_dev->priv.eswitch;
u16 peer_index = mlx5_get_dev_index(peer_dev);
struct mlx5_flow_handle **flows;
- struct mlx5_vport *vport;
+ struct mlx5_vport *peer_vport;
unsigned long i;
flows = esw->fdb_table.offloads.peer_miss_rules[peer_index];
if (!flows)
return;
- if (mlx5_core_ec_sriov_enabled(esw->dev)) {
- mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
- /* The flow for a particular vport could be NULL if the other ECPF
- * has fewer or no VFs enabled
- */
- if (!flows[vport->index])
- continue;
- mlx5_del_flow_rules(flows[vport->index]);
- }
+ if (mlx5_core_ec_sriov_enabled(peer_dev)) {
+ mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_ec_vfs(peer_dev))
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
- mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev))
- mlx5_del_flow_rules(flows[vport->index]);
+ mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_vfs(peer_dev))
+ mlx5_del_flow_rules(flows[peer_vport->index]);
- if (mlx5_ecpf_vport_exists(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
- mlx5_del_flow_rules(flows[vport->index]);
+ if (mlx5_ecpf_vport_exists(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
- if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
- mlx5_del_flow_rules(flows[vport->index]);
+ if (mlx5_core_is_ecpf_esw_manager(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
kvfree(flows);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index a8046200d376..d87392360dbd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -113,13 +113,16 @@
#define ETHTOOL_PRIO_NUM_LEVELS 1
#define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
-/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy,
+/* Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy,
* {IPsec RoCE MPV,Alias table},IPsec RoCE policy
*/
-#define KERNEL_NIC_PRIO_NUM_LEVELS 11
+#define KERNEL_NIC_PRIO_NUM_LEVELS 10
#define KERNEL_NIC_NUM_PRIOS 1
-/* One more level for tc */
-#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
+/* One more level for tc, and one more for promisc */
+#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 2)
+
+#define KERNEL_NIC_PROMISC_NUM_PRIOS 1
+#define KERNEL_NIC_PROMISC_NUM_LEVELS 1
#define KERNEL_NIC_TC_NUM_PRIOS 1
#define KERNEL_NIC_TC_NUM_LEVELS 3
@@ -187,6 +190,8 @@ static struct init_tree_node {
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS,
KERNEL_NIC_TC_NUM_LEVELS),
+ ADD_MULTIPLE_PRIO(KERNEL_NIC_PROMISC_NUM_PRIOS,
+ KERNEL_NIC_PROMISC_NUM_LEVELS),
ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
KERNEL_NIC_PRIO_NUM_LEVELS))),
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
@@ -3245,34 +3250,62 @@ static int
init_rdma_transport_rx_root_ns_one(struct mlx5_flow_steering *steering,
int vport_idx)
{
+ struct mlx5_flow_root_namespace *root_ns;
struct fs_prio *prio;
+ int ret;
+ int i;
steering->rdma_transport_rx_root_ns[vport_idx] =
create_root_ns(steering, FS_FT_RDMA_TRANSPORT_RX);
if (!steering->rdma_transport_rx_root_ns[vport_idx])
return -ENOMEM;
- /* create 1 prio*/
- prio = fs_create_prio(&steering->rdma_transport_rx_root_ns[vport_idx]->ns,
- MLX5_RDMA_TRANSPORT_BYPASS_PRIO, 1);
- return PTR_ERR_OR_ZERO(prio);
+ root_ns = steering->rdma_transport_rx_root_ns[vport_idx];
+
+ for (i = 0; i < MLX5_RDMA_TRANSPORT_BYPASS_PRIO; i++) {
+ prio = fs_create_prio(&root_ns->ns, i, 1);
+ if (IS_ERR(prio)) {
+ ret = PTR_ERR(prio);
+ goto err;
+ }
+ }
+ set_prio_attrs(root_ns);
+ return 0;
+
+err:
+ cleanup_root_ns(root_ns);
+ return ret;
}
static int
init_rdma_transport_tx_root_ns_one(struct mlx5_flow_steering *steering,
int vport_idx)
{
+ struct mlx5_flow_root_namespace *root_ns;
struct fs_prio *prio;
+ int ret;
+ int i;
steering->rdma_transport_tx_root_ns[vport_idx] =
create_root_ns(steering, FS_FT_RDMA_TRANSPORT_TX);
if (!steering->rdma_transport_tx_root_ns[vport_idx])
return -ENOMEM;
- /* create 1 prio*/
- prio = fs_create_prio(&steering->rdma_transport_tx_root_ns[vport_idx]->ns,
- MLX5_RDMA_TRANSPORT_BYPASS_PRIO, 1);
- return PTR_ERR_OR_ZERO(prio);
+ root_ns = steering->rdma_transport_tx_root_ns[vport_idx];
+
+ for (i = 0; i < MLX5_RDMA_TRANSPORT_BYPASS_PRIO; i++) {
+ prio = fs_create_prio(&root_ns->ns, i, 1);
+ if (IS_ERR(prio)) {
+ ret = PTR_ERR(prio);
+ goto err;
+ }
+ }
+ set_prio_attrs(root_ns);
+ return 0;
+
+err:
+ cleanup_root_ns(root_ns);
+ return ret;
}
static int init_rdma_transport_rx_root_ns(struct mlx5_flow_steering *steering)
@@ -3919,6 +3952,8 @@ int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
if (mlx5_fs_dr_is_supported(dev))
steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
+ else if (mlx5_fs_hws_is_supported(dev))
+ steering->mode = MLX5_FLOW_STEERING_MODE_HMFS;
else
steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 9772327d5124..4b3430ac3905 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -238,6 +238,23 @@ static u32 mlx5i_flow_type_mask(u32 flow_type)
return flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
}
+static int mlx5i_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ return mlx5e_ethtool_set_rxfh_fields(priv, cmd, extack);
+}
+
+static int mlx5i_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ return mlx5e_ethtool_get_rxfh_fields(priv, info);
+}
+
static int mlx5i_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
@@ -283,6 +300,8 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
.get_coalesce = mlx5i_get_coalesce,
.set_coalesce = mlx5i_set_coalesce,
.get_ts_info = mlx5i_get_ts_info,
+ .get_rxfh_fields = mlx5i_get_rxfh_fields,
+ .set_rxfh_fields = mlx5i_set_rxfh_fields,
.get_rxnfc = mlx5i_get_rxnfc,
.set_rxnfc = mlx5i_set_rxnfc,
.get_link_ksettings = mlx5i_get_link_ksettings,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
index 2691d88cdee1..82d3c2568244 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
@@ -47,29 +47,40 @@ static int cpu_get_least_loaded(struct mlx5_irq_pool *pool,
static struct mlx5_irq *
irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
{
- struct irq_affinity_desc auto_desc = {};
+ struct irq_affinity_desc *auto_desc;
struct mlx5_irq *irq;
u32 irq_index;
int err;
+ auto_desc = kvzalloc(sizeof(*auto_desc), GFP_KERNEL);
+ if (!auto_desc)
+ return ERR_PTR(-ENOMEM);
+
err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs, GFP_KERNEL);
- if (err)
+ if (err) {
+ kvfree(auto_desc);
return ERR_PTR(err);
+ }
+
if (pool->irqs_per_cpu) {
if (cpumask_weight(&af_desc->mask) > 1)
/* if req_mask contain more then one CPU, set the least loadad CPU
* of req_mask
*/
cpumask_set_cpu(cpu_get_least_loaded(pool, &af_desc->mask),
- &auto_desc.mask);
+ &auto_desc->mask);
else
cpu_get(pool, cpumask_first(&af_desc->mask));
}
+
irq = mlx5_irq_alloc(pool, irq_index,
- cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc,
+ cpumask_empty(&auto_desc->mask) ? af_desc : auto_desc,
NULL);
if (IS_ERR(irq))
xa_erase(&pool->irqs, irq_index);
+
+ kvfree(auto_desc);
+
return irq;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index cec18efadc73..214d732d18e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -343,7 +343,7 @@ static u64 mlx5_read_time(struct mlx5_core_dev *dev,
(u64)timer_l | (u64)timer_h1 << 32;
}
-static u64 read_internal_timer(const struct cyclecounter *cc)
+static u64 read_internal_timer(struct cyclecounter *cc)
{
struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles);
struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
index 7c5516b0a844..8115071c34a4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
@@ -30,7 +30,7 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
dm = kzalloc(sizeof(*dm), GFP_KERNEL);
if (!dm)
- return ERR_PTR(-ENOMEM);
+ return NULL;
spin_lock_init(&dm->lock);
@@ -96,7 +96,7 @@ err_modify_hdr:
err_steering:
kfree(dm);
- return ERR_PTR(-ENOMEM);
+ return NULL;
}
void mlx5_dm_cleanup(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 41e8660c819c..e7bcd0f0a709 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1102,9 +1102,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
}
dev->dm = mlx5_dm_create(dev);
- if (IS_ERR(dev->dm))
- mlx5_core_warn(dev, "Failed to init device memory %ld\n", PTR_ERR(dev->dm));
-
dev->tracer = mlx5_fw_tracer_create(dev);
dev->hv_vhca = mlx5_hv_vhca_create(dev);
dev->rsc_dump = mlx5_rsc_dump_create(dev);
@@ -2257,6 +2254,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
{ PCI_VDEVICE(MELLANOX, 0x1023) }, /* ConnectX-8 */
{ PCI_VDEVICE(MELLANOX, 0x1025) }, /* ConnectX-9 */
+ { PCI_VDEVICE(MELLANOX, 0x1027) }, /* ConnectX-10 */
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 2e02bdea8361..c518380c4ce7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -495,4 +495,17 @@ static inline int mlx5_max_eq_cap_get(const struct mlx5_core_dev *dev)
return 1 << MLX5_CAP_GEN(dev, log_max_eq);
}
+
+static inline bool mlx5_pcie_cong_event_supported(struct mlx5_core_dev *dev)
+{
+ u64 features = MLX5_CAP_GEN_2_64(dev, general_obj_types_127_64);
+
+ if (!(features & MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT))
+ return false;
+
+ if (dev->sd)
+ return false;
+
+ return true;
+}
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 40024cfa3099..692ef9c2f729 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -470,26 +470,32 @@ void mlx5_ctrl_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *ctrl_irq)
struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev)
{
struct mlx5_irq_pool *pool = ctrl_irq_pool_get(dev);
- struct irq_affinity_desc af_desc;
+ struct irq_affinity_desc *af_desc;
struct mlx5_irq *irq;
- cpumask_copy(&af_desc.mask, cpu_online_mask);
- af_desc.is_managed = false;
+ af_desc = kvzalloc(sizeof(*af_desc), GFP_KERNEL);
+ if (!af_desc)
+ return ERR_PTR(-ENOMEM);
+
+ cpumask_copy(&af_desc->mask, cpu_online_mask);
+ af_desc->is_managed = false;
if (!mlx5_irq_pool_is_sf_pool(pool)) {
/* In case we are allocating a control IRQ from a pci device's pool.
* This can happen also for a SF if the SFs pool is empty.
*/
if (!pool->xa_num_irqs.max) {
- cpumask_clear(&af_desc.mask);
+ cpumask_clear(&af_desc->mask);
/* In case we only have a single IRQ for PF/VF */
- cpumask_set_cpu(cpumask_first(cpu_online_mask), &af_desc.mask);
+ cpumask_set_cpu(cpumask_first(cpu_online_mask), &af_desc->mask);
}
/* Allocate the IRQ in index 0. The vector was already allocated */
- irq = irq_pool_request_vector(pool, 0, &af_desc, NULL);
+ irq = irq_pool_request_vector(pool, 0, af_desc, NULL);
} else {
- irq = mlx5_irq_affinity_request(dev, pool, &af_desc);
+ irq = mlx5_irq_affinity_request(dev, pool, af_desc);
}
+ kvfree(af_desc);
+
return irq;
}
@@ -548,16 +554,26 @@ struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu,
{
struct mlx5_irq_table *table = mlx5_irq_table_get(dev);
struct mlx5_irq_pool *pool = table->pcif_pool;
- struct irq_affinity_desc af_desc;
int offset = MLX5_IRQ_VEC_COMP_BASE;
+ struct irq_affinity_desc *af_desc;
+ struct mlx5_irq *irq;
+
+ af_desc = kvzalloc(sizeof(*af_desc), GFP_KERNEL);
+ if (!af_desc)
+ return ERR_PTR(-ENOMEM);
if (!pool->xa_num_irqs.max)
offset = 0;
- af_desc.is_managed = false;
- cpumask_clear(&af_desc.mask);
- cpumask_set_cpu(cpu, &af_desc.mask);
- return mlx5_irq_request(dev, vecidx + offset, &af_desc, rmap);
+ af_desc->is_managed = false;
+ cpumask_clear(&af_desc->mask);
+ cpumask_set_cpu(cpu, &af_desc->mask);
+
+ irq = mlx5_irq_request(dev, vecidx + offset, af_desc, rmap);
+
+ kvfree(af_desc);
+
+ return irq;
}
static struct mlx5_irq_pool *
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
index 447ea3f8722c..396804369b00 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
@@ -1358,12 +1358,9 @@ free_action:
}
struct mlx5hws_action *
-mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx,
- size_t num_dest,
+mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx, size_t num_dest,
struct mlx5hws_action_dest_attr *dests,
- bool ignore_flow_level,
- u32 flow_source,
- u32 flags)
+ bool ignore_flow_level, u32 flags)
{
struct mlx5hws_cmd_set_fte_dest *dest_list = NULL;
struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
index 9e057f808ea5..92de4b761a83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
@@ -48,7 +48,7 @@ static void hws_bwc_unlock_all_queues(struct mlx5hws_context *ctx)
static void hws_bwc_matcher_init_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
u32 priority,
- u8 size_log,
+ u8 size_log_rx, u8 size_log_tx,
struct mlx5hws_matcher_attr *attr)
{
struct mlx5hws_bwc_matcher *first_matcher =
@@ -62,7 +62,8 @@ static void hws_bwc_matcher_init_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
attr->optimize_flow_src = MLX5HWS_MATCHER_FLOW_SRC_ANY;
attr->insert_mode = MLX5HWS_MATCHER_INSERT_BY_HASH;
attr->distribute_mode = MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH;
- attr->rule.num_log = size_log;
+ attr->size[MLX5HWS_MATCHER_SIZE_TYPE_RX].rule.num_log = size_log_rx;
+ attr->size[MLX5HWS_MATCHER_SIZE_TYPE_TX].rule.num_log = size_log_tx;
attr->resizable = true;
attr->max_num_of_at_attach = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM;
@@ -70,6 +71,130 @@ static void hws_bwc_matcher_init_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
first_matcher ? first_matcher->matcher->end_ft_id : 0;
}
+static int
+hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ bool move_error = false, poll_error = false, drain_error = false;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_matcher *matcher = bwc_matcher->matcher;
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mlx5hws_rule_attr rule_attr;
+ struct mlx5hws_bwc_rule *bwc_rule;
+ struct mlx5hws_send_engine *queue;
+ struct list_head *rules_list;
+ u32 pending_rules;
+ int i, ret = 0;
+
+ mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr);
+
+ for (i = 0; i < bwc_queues; i++) {
+ if (list_empty(&bwc_matcher->rules[i]))
+ continue;
+
+ pending_rules = 0;
+ rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
+ rules_list = &bwc_matcher->rules[i];
+
+ list_for_each_entry(bwc_rule, rules_list, list_node) {
+ ret = mlx5hws_matcher_resize_rule_move(matcher,
+ bwc_rule->rule,
+ &rule_attr);
+ if (unlikely(ret && !move_error)) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule: move failed (%d), attempting to move rest of the rules\n",
+ ret);
+ move_error = true;
+ }
+
+ pending_rules++;
+ ret = mlx5hws_bwc_queue_poll(ctx,
+ rule_attr.queue_id,
+ &pending_rules,
+ false);
+ if (unlikely(ret && !poll_error)) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule: poll failed (%d), attempting to move rest of the rules\n",
+ ret);
+ poll_error = true;
+ }
+ }
+
+ if (pending_rules) {
+ queue = &ctx->send_queue[rule_attr.queue_id];
+ mlx5hws_send_engine_flush_queue(queue);
+ ret = mlx5hws_bwc_queue_poll(ctx,
+ rule_attr.queue_id,
+ &pending_rules,
+ true);
+ if (unlikely(ret && !drain_error)) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule: drain failed (%d), attempting to move rest of the rules\n",
+ ret);
+ drain_error = true;
+ }
+ }
+ }
+
+ if (move_error || poll_error || drain_error)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static int hws_bwc_matcher_move_all(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ if (!bwc_matcher->complex)
+ return hws_bwc_matcher_move_all_simple(bwc_matcher);
+
+ return mlx5hws_bwc_matcher_move_all_complex(bwc_matcher);
+}
+
+static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_matcher_attr matcher_attr = {0};
+ struct mlx5hws_matcher *old_matcher;
+ struct mlx5hws_matcher *new_matcher;
+ int ret;
+
+ hws_bwc_matcher_init_attr(bwc_matcher,
+ bwc_matcher->priority,
+ bwc_matcher->rx_size.size_log,
+ bwc_matcher->tx_size.size_log,
+ &matcher_attr);
+
+ old_matcher = bwc_matcher->matcher;
+ new_matcher = mlx5hws_matcher_create(old_matcher->tbl,
+ &bwc_matcher->mt, 1,
+ bwc_matcher->at,
+ bwc_matcher->num_of_at,
+ &matcher_attr);
+ if (!new_matcher) {
+ mlx5hws_err(ctx, "Rehash error: matcher creation failed\n");
+ return -ENOMEM;
+ }
+
+ ret = mlx5hws_matcher_resize_set_target(old_matcher, new_matcher);
+ if (ret) {
+ mlx5hws_err(ctx, "Rehash error: failed setting resize target\n");
+ return ret;
+ }
+
+ ret = hws_bwc_matcher_move_all(bwc_matcher);
+ if (ret)
+ mlx5hws_err(ctx, "Rehash error: moving rules failed, attempting to remove the old matcher\n");
+
+ /* Error during rehash can't be rolled back.
+ * The best option here is to allow the rehash to complete and remove
+ * the old matcher - can't leave the matcher in the 'in_resize' state.
+ */
+
+ bwc_matcher->matcher = new_matcher;
+ mlx5hws_matcher_destroy(old_matcher);
+
+ return ret;
+}
+
int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
struct mlx5hws_table *table,
u32 priority,
@@ -92,11 +217,11 @@ int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
hws_bwc_matcher_init_attr(bwc_matcher,
priority,
- MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG,
+ bwc_matcher->rx_size.size_log,
+ bwc_matcher->tx_size.size_log,
&attr);
bwc_matcher->priority = priority;
- bwc_matcher->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG;
bwc_matcher->size_of_at_array = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM;
bwc_matcher->at = kcalloc(bwc_matcher->size_of_at_array,
@@ -148,6 +273,20 @@ err:
return -EINVAL;
}
+static void
+hws_bwc_matcher_init_size_rxtx(struct mlx5hws_bwc_matcher_size *size)
+{
+ size->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG;
+ atomic_set(&size->num_of_rules, 0);
+ atomic_set(&size->rehash_required, false);
+}
+
+static void hws_bwc_matcher_init_size(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ hws_bwc_matcher_init_size_rxtx(&bwc_matcher->rx_size);
+ hws_bwc_matcher_init_size_rxtx(&bwc_matcher->tx_size);
+}
+
struct mlx5hws_bwc_matcher *
mlx5hws_bwc_matcher_create(struct mlx5hws_table *table,
u32 priority,
@@ -168,8 +307,7 @@ mlx5hws_bwc_matcher_create(struct mlx5hws_table *table,
if (!bwc_matcher)
return NULL;
- atomic_set(&bwc_matcher->num_of_rules, 0);
- atomic_set(&bwc_matcher->rehash_required, false);
+ hws_bwc_matcher_init_size(bwc_matcher);
/* Check if the required match params can be all matched
* in single STE, otherwise complex matcher is needed.
@@ -219,12 +357,13 @@ int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
{
- u32 num_of_rules = atomic_read(&bwc_matcher->num_of_rules);
+ u32 rx_rules = atomic_read(&bwc_matcher->rx_size.num_of_rules);
+ u32 tx_rules = atomic_read(&bwc_matcher->tx_size.num_of_rules);
- if (num_of_rules)
+ if (rx_rules || tx_rules)
mlx5hws_err(bwc_matcher->matcher->tbl->ctx,
- "BWC matcher destroy: matcher still has %d rules\n",
- num_of_rules);
+ "BWC matcher destroy: matcher still has %u RX and %u TX rules\n",
+ rx_rules, tx_rules);
if (bwc_matcher->complex)
mlx5hws_bwc_matcher_destroy_complex(bwc_matcher);
@@ -384,6 +523,80 @@ hws_bwc_rule_destroy_hws_sync(struct mlx5hws_bwc_rule *bwc_rule,
return 0;
}
+static void hws_bwc_rule_cnt_dec(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+
+ if (!bwc_rule->skip_rx)
+ atomic_dec(&bwc_matcher->rx_size.num_of_rules);
+ if (!bwc_rule->skip_tx)
+ atomic_dec(&bwc_matcher->tx_size.num_of_rules);
+}
+
+static int
+hws_bwc_matcher_rehash_shrink(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_bwc_matcher_size *rx_size = &bwc_matcher->rx_size;
+ struct mlx5hws_bwc_matcher_size *tx_size = &bwc_matcher->tx_size;
+
+ /* It is possible that another thread has added a rule.
+ * Need to check again if we really need rehash/shrink.
+ */
+ if (atomic_read(&rx_size->num_of_rules) ||
+ atomic_read(&tx_size->num_of_rules))
+ return 0;
+
+ /* If the current matcher RX/TX size is already at its initial size. */
+ if (rx_size->size_log == MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG &&
+ tx_size->size_log == MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG)
+ return 0;
+
+ /* Now we've done all the checking - do the shrinking:
+ * - reset match RTC size to the initial size
+ * - create new matcher
+ * - move the rules, which will not do anything as the matcher is empty
+ * - destroy the old matcher
+ */
+
+ rx_size->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG;
+ tx_size->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG;
+
+ return hws_bwc_matcher_move(bwc_matcher);
+}
+
+static int hws_bwc_rule_cnt_dec_with_shrink(struct mlx5hws_bwc_rule *bwc_rule,
+ u16 bwc_queue_idx)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mutex *queue_lock; /* Protect the queue */
+ int ret;
+
+ hws_bwc_rule_cnt_dec(bwc_rule);
+
+ if (atomic_read(&bwc_matcher->rx_size.num_of_rules) ||
+ atomic_read(&bwc_matcher->tx_size.num_of_rules))
+ return 0;
+
+ /* Matcher has no more rules - shrink it to save ICM. */
+
+ queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx);
+ mutex_unlock(queue_lock);
+
+ hws_bwc_lock_all_queues(ctx);
+ ret = hws_bwc_matcher_rehash_shrink(bwc_matcher);
+ hws_bwc_unlock_all_queues(ctx);
+
+ mutex_lock(queue_lock);
+
+ if (unlikely(ret))
+ mlx5hws_err(ctx,
+ "BWC rule deletion: shrinking empty matcher failed (%d)\n",
+ ret);
+
+ return ret;
+}
+
int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule)
{
struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
@@ -400,8 +613,8 @@ int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule)
mutex_lock(queue_lock);
ret = hws_bwc_rule_destroy_hws_sync(bwc_rule, &attr);
- atomic_dec(&bwc_matcher->num_of_rules);
hws_bwc_rule_list_remove(bwc_rule);
+ hws_bwc_rule_cnt_dec_with_shrink(bwc_rule, idx);
mutex_unlock(queue_lock);
@@ -487,25 +700,27 @@ hws_bwc_rule_update_sync(struct mlx5hws_bwc_rule *bwc_rule,
}
static bool
-hws_bwc_matcher_size_maxed_out(struct mlx5hws_bwc_matcher *bwc_matcher)
+hws_bwc_matcher_size_maxed_out(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_bwc_matcher_size *size)
{
struct mlx5hws_cmd_query_caps *caps = bwc_matcher->matcher->tbl->ctx->caps;
/* check the match RTC size */
- return (bwc_matcher->size_log + MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH +
+ return (size->size_log + MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH +
MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP) >
(caps->ste_alloc_log_max - 1);
}
static bool
hws_bwc_matcher_rehash_size_needed(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_bwc_matcher_size *size,
u32 num_of_rules)
{
- if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher)))
+ if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher, size)))
return false;
if (unlikely((num_of_rules * 100 / MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH) >=
- (1UL << bwc_matcher->size_log)))
+ (1UL << size->size_log)))
return true;
return false;
@@ -562,20 +777,21 @@ hws_bwc_matcher_extend_at(struct mlx5hws_bwc_matcher *bwc_matcher,
}
static int
-hws_bwc_matcher_extend_size(struct mlx5hws_bwc_matcher *bwc_matcher)
+hws_bwc_matcher_extend_size(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_bwc_matcher_size *size)
{
struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
struct mlx5hws_cmd_query_caps *caps = ctx->caps;
- if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher))) {
+ if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher, size))) {
mlx5hws_err(ctx, "Can't resize matcher: depth exceeds limit %d\n",
caps->rtc_log_depth_max);
return -ENOMEM;
}
- bwc_matcher->size_log =
- min(bwc_matcher->size_log + MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP,
- caps->ste_alloc_log_max - MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH);
+ size->size_log = min(size->size_log + MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP,
+ caps->ste_alloc_log_max -
+ MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH);
return 0;
}
@@ -608,146 +824,42 @@ hws_bwc_matcher_find_at(struct mlx5hws_bwc_matcher *bwc_matcher,
return -1;
}
-static int hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
-{
- bool move_error = false, poll_error = false, drain_error = false;
- struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
- struct mlx5hws_matcher *matcher = bwc_matcher->matcher;
- u16 bwc_queues = mlx5hws_bwc_queues(ctx);
- struct mlx5hws_rule_attr rule_attr;
- struct mlx5hws_bwc_rule *bwc_rule;
- struct mlx5hws_send_engine *queue;
- struct list_head *rules_list;
- u32 pending_rules;
- int i, ret = 0;
-
- mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr);
-
- for (i = 0; i < bwc_queues; i++) {
- if (list_empty(&bwc_matcher->rules[i]))
- continue;
-
- pending_rules = 0;
- rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
- rules_list = &bwc_matcher->rules[i];
-
- list_for_each_entry(bwc_rule, rules_list, list_node) {
- ret = mlx5hws_matcher_resize_rule_move(matcher,
- bwc_rule->rule,
- &rule_attr);
- if (unlikely(ret && !move_error)) {
- mlx5hws_err(ctx,
- "Moving BWC rule: move failed (%d), attempting to move rest of the rules\n",
- ret);
- move_error = true;
- }
-
- pending_rules++;
- ret = mlx5hws_bwc_queue_poll(ctx,
- rule_attr.queue_id,
- &pending_rules,
- false);
- if (unlikely(ret && !poll_error)) {
- mlx5hws_err(ctx,
- "Moving BWC rule: poll failed (%d), attempting to move rest of the rules\n",
- ret);
- poll_error = true;
- }
- }
-
- if (pending_rules) {
- queue = &ctx->send_queue[rule_attr.queue_id];
- mlx5hws_send_engine_flush_queue(queue);
- ret = mlx5hws_bwc_queue_poll(ctx,
- rule_attr.queue_id,
- &pending_rules,
- true);
- if (unlikely(ret && !drain_error)) {
- mlx5hws_err(ctx,
- "Moving BWC rule: drain failed (%d), attempting to move rest of the rules\n",
- ret);
- drain_error = true;
- }
- }
- }
-
- if (move_error || poll_error || drain_error)
- ret = -EINVAL;
-
- return ret;
-}
-
-static int hws_bwc_matcher_move_all(struct mlx5hws_bwc_matcher *bwc_matcher)
-{
- if (!bwc_matcher->complex)
- return hws_bwc_matcher_move_all_simple(bwc_matcher);
-
- return mlx5hws_bwc_matcher_move_all_complex(bwc_matcher);
-}
-
-static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher)
-{
- struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
- struct mlx5hws_matcher_attr matcher_attr = {0};
- struct mlx5hws_matcher *old_matcher;
- struct mlx5hws_matcher *new_matcher;
- int ret;
-
- hws_bwc_matcher_init_attr(bwc_matcher,
- bwc_matcher->priority,
- bwc_matcher->size_log,
- &matcher_attr);
-
- old_matcher = bwc_matcher->matcher;
- new_matcher = mlx5hws_matcher_create(old_matcher->tbl,
- &bwc_matcher->mt, 1,
- bwc_matcher->at,
- bwc_matcher->num_of_at,
- &matcher_attr);
- if (!new_matcher) {
- mlx5hws_err(ctx, "Rehash error: matcher creation failed\n");
- return -ENOMEM;
- }
-
- ret = mlx5hws_matcher_resize_set_target(old_matcher, new_matcher);
- if (ret) {
- mlx5hws_err(ctx, "Rehash error: failed setting resize target\n");
- return ret;
- }
-
- ret = hws_bwc_matcher_move_all(bwc_matcher);
- if (ret)
- mlx5hws_err(ctx, "Rehash error: moving rules failed, attempting to remove the old matcher\n");
-
- /* Error during rehash can't be rolled back.
- * The best option here is to allow the rehash to complete and remove
- * the old matcher - can't leave the matcher in the 'in_resize' state.
- */
-
- bwc_matcher->matcher = new_matcher;
- mlx5hws_matcher_destroy(old_matcher);
-
- return ret;
-}
-
static int
hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
{
+ bool need_rx_rehash, need_tx_rehash;
int ret;
- /* If the current matcher size is already at its max size, we can't
- * do the rehash. Skip it and try adding the rule again - perhaps
- * there was some change.
+ need_rx_rehash = atomic_read(&bwc_matcher->rx_size.rehash_required);
+ need_tx_rehash = atomic_read(&bwc_matcher->tx_size.rehash_required);
+
+ /* It is possible that another rule has already performed rehash.
+ * Need to check again if we really need rehash.
*/
- if (hws_bwc_matcher_size_maxed_out(bwc_matcher))
+ if (!need_rx_rehash && !need_tx_rehash)
return 0;
- /* It is possible that other rule has already performed rehash.
- * Need to check again if we really need rehash.
+ /* If the current matcher RX/TX size is already at its max size,
+ * it can't be rehashed.
*/
- if (!atomic_read(&bwc_matcher->rehash_required) &&
- !hws_bwc_matcher_rehash_size_needed(bwc_matcher,
- atomic_read(&bwc_matcher->num_of_rules)))
+ if (need_rx_rehash &&
+ hws_bwc_matcher_size_maxed_out(bwc_matcher,
+ &bwc_matcher->rx_size)) {
+ atomic_set(&bwc_matcher->rx_size.rehash_required, false);
+ need_rx_rehash = false;
+ }
+ if (need_tx_rehash &&
+ hws_bwc_matcher_size_maxed_out(bwc_matcher,
+ &bwc_matcher->tx_size)) {
+ atomic_set(&bwc_matcher->tx_size.rehash_required, false);
+ need_tx_rehash = false;
+ }
+
+ /* If both RX and TX rehash flags are now off, it means that whatever
+ * we wanted to rehash is now at its max size - no rehash can be done.
+ * Return and try adding the rule again - perhaps there was some change.
+ */
+ if (!need_rx_rehash && !need_tx_rehash)
return 0;
/* Now we're done all the checking - do the rehash:
@@ -756,12 +868,22 @@ hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
* - move all the rules to the new matcher
* - destroy the old matcher
*/
+ atomic_set(&bwc_matcher->rx_size.rehash_required, false);
+ atomic_set(&bwc_matcher->tx_size.rehash_required, false);
- atomic_set(&bwc_matcher->rehash_required, false);
+ if (need_rx_rehash) {
+ ret = hws_bwc_matcher_extend_size(bwc_matcher,
+ &bwc_matcher->rx_size);
+ if (ret)
+ return ret;
+ }
- ret = hws_bwc_matcher_extend_size(bwc_matcher);
- if (ret)
- return ret;
+ if (need_tx_rehash) {
+ ret = hws_bwc_matcher_extend_size(bwc_matcher,
+ &bwc_matcher->tx_size);
+ if (ret)
+ return ret;
+ }
return hws_bwc_matcher_move(bwc_matcher);
}
@@ -813,6 +935,62 @@ out:
return at_idx;
}
+static void hws_bwc_rule_cnt_inc_rxtx(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_bwc_matcher_size *size)
+{
+ u32 num_of_rules = atomic_inc_return(&size->num_of_rules);
+
+ if (unlikely(hws_bwc_matcher_rehash_size_needed(bwc_rule->bwc_matcher,
+ size, num_of_rules)))
+ atomic_set(&size->rehash_required, true);
+}
+
+static void hws_bwc_rule_cnt_inc(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+
+ if (!bwc_rule->skip_rx)
+ hws_bwc_rule_cnt_inc_rxtx(bwc_rule, &bwc_matcher->rx_size);
+ if (!bwc_rule->skip_tx)
+ hws_bwc_rule_cnt_inc_rxtx(bwc_rule, &bwc_matcher->tx_size);
+}
+
+static int hws_bwc_rule_cnt_inc_with_rehash(struct mlx5hws_bwc_rule *bwc_rule,
+ u16 bwc_queue_idx)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mutex *queue_lock; /* Protect the queue */
+ int ret;
+
+ hws_bwc_rule_cnt_inc(bwc_rule);
+
+ if (!atomic_read(&bwc_matcher->rx_size.rehash_required) &&
+ !atomic_read(&bwc_matcher->tx_size.rehash_required))
+ return 0;
+
+ queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx);
+ mutex_unlock(queue_lock);
+
+ hws_bwc_lock_all_queues(ctx);
+ ret = hws_bwc_matcher_rehash_size(bwc_matcher);
+ hws_bwc_unlock_all_queues(ctx);
+
+ mutex_lock(queue_lock);
+
+ if (likely(!ret))
+ return 0;
+
+ /* Failed to rehash. Print a diagnostic and rollback the counters. */
+ mlx5hws_err(ctx,
+ "BWC rule insertion: rehash to sizes [%d, %d] failed (%d)\n",
+ bwc_matcher->rx_size.size_log,
+ bwc_matcher->tx_size.size_log, ret);
+ hws_bwc_rule_cnt_dec(bwc_rule);
+
+ return ret;
+}
+
int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
u32 *match_param,
struct mlx5hws_rule_action rule_actions[],
@@ -823,7 +1001,6 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
struct mlx5hws_rule_attr rule_attr;
struct mutex *queue_lock; /* Protect the queue */
- u32 num_of_rules;
int ret = 0;
int at_idx;
@@ -841,26 +1018,10 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
return -EINVAL;
}
- /* check if number of rules require rehash */
- num_of_rules = atomic_inc_return(&bwc_matcher->num_of_rules);
-
- if (unlikely(hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))) {
+ ret = hws_bwc_rule_cnt_inc_with_rehash(bwc_rule, bwc_queue_idx);
+ if (unlikely(ret)) {
mutex_unlock(queue_lock);
-
- hws_bwc_lock_all_queues(ctx);
- ret = hws_bwc_matcher_rehash_size(bwc_matcher);
- hws_bwc_unlock_all_queues(ctx);
-
- if (ret) {
- mlx5hws_err(ctx, "BWC rule insertion: rehash size [%d -> %d] failed (%d)\n",
- bwc_matcher->size_log - MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP,
- bwc_matcher->size_log,
- ret);
- atomic_dec(&bwc_matcher->num_of_rules);
- return ret;
- }
-
- mutex_lock(queue_lock);
+ return ret;
}
ret = hws_bwc_rule_create_sync(bwc_rule,
@@ -876,12 +1037,13 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
/* At this point the rule wasn't added.
* It could be because there was collision, or some other problem.
- * If we don't dive deeper than API, the only thing we know is that
- * the status of completion is RTE_FLOW_OP_ERROR.
* Try rehash by size and insert rule again - last chance.
*/
+ if (!bwc_rule->skip_rx)
+ atomic_set(&bwc_matcher->rx_size.rehash_required, true);
+ if (!bwc_rule->skip_tx)
+ atomic_set(&bwc_matcher->tx_size.rehash_required, true);
- atomic_set(&bwc_matcher->rehash_required, true);
mutex_unlock(queue_lock);
hws_bwc_lock_all_queues(ctx);
@@ -890,7 +1052,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
if (ret) {
mlx5hws_err(ctx, "BWC rule insertion: rehash failed (%d)\n", ret);
- atomic_dec(&bwc_matcher->num_of_rules);
+ hws_bwc_rule_cnt_dec(bwc_rule);
return ret;
}
@@ -906,7 +1068,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
if (unlikely(ret)) {
mutex_unlock(queue_lock);
mlx5hws_err(ctx, "BWC rule insertion failed (%d)\n", ret);
- atomic_dec(&bwc_matcher->num_of_rules);
+ hws_bwc_rule_cnt_dec(bwc_rule);
return ret;
}
@@ -936,6 +1098,10 @@ mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher,
if (unlikely(!bwc_rule))
return NULL;
+ bwc_rule->flow_source = flow_source;
+ mlx5hws_rule_skip(bwc_matcher->matcher, flow_source,
+ &bwc_rule->skip_rx, &bwc_rule->skip_tx);
+
bwc_queue_idx = hws_bwc_gen_queue_idx(ctx);
if (bwc_matcher->complex)
@@ -971,7 +1137,8 @@ hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
idx = bwc_rule->bwc_queue_idx;
- mlx5hws_bwc_rule_fill_attr(bwc_matcher, idx, 0, &rule_attr);
+ mlx5hws_bwc_rule_fill_attr(bwc_matcher, idx, bwc_rule->flow_source,
+ &rule_attr);
queue_lock = hws_bwc_get_queue_lock(ctx, idx);
mutex_lock(queue_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
index d21fc247a510..af391d70c14f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
@@ -19,6 +19,13 @@
#define MLX5HWS_BWC_POLLING_TIMEOUT 60
struct mlx5hws_bwc_matcher_complex_data;
+
+struct mlx5hws_bwc_matcher_size {
+ u8 size_log;
+ atomic_t num_of_rules;
+ atomic_t rehash_required;
+};
+
struct mlx5hws_bwc_matcher {
struct mlx5hws_matcher *matcher;
struct mlx5hws_match_template *mt;
@@ -27,10 +34,9 @@ struct mlx5hws_bwc_matcher {
struct mlx5hws_bwc_matcher *complex_first_bwc_matcher;
u8 num_of_at;
u8 size_of_at_array;
- u8 size_log;
u32 priority;
- atomic_t num_of_rules;
- atomic_t rehash_required;
+ struct mlx5hws_bwc_matcher_size rx_size;
+ struct mlx5hws_bwc_matcher_size tx_size;
struct list_head *rules;
};
@@ -39,7 +45,10 @@ struct mlx5hws_bwc_rule {
struct mlx5hws_rule *rule;
struct mlx5hws_bwc_rule *isolated_bwc_rule;
struct mlx5hws_bwc_complex_rule_hash_node *complex_hash_node;
+ u32 flow_source;
u16 bwc_queue_idx;
+ bool skip_rx;
+ bool skip_tx;
struct list_head list_node;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
index 91568d6c1dac..2ec8cb10139a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
@@ -99,17 +99,19 @@ hws_debug_dump_matcher_attr(struct seq_file *f, struct mlx5hws_matcher *matcher)
{
struct mlx5hws_matcher_attr *attr = &matcher->attr;
- seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d\n",
+ seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d,-1,-1,%d,%d\n",
MLX5HWS_DEBUG_RES_TYPE_MATCHER_ATTR,
HWS_PTR_TO_ID(matcher),
attr->priority,
attr->mode,
- attr->table.sz_row_log,
- attr->table.sz_col_log,
+ attr->size[MLX5HWS_MATCHER_SIZE_TYPE_RX].table.sz_row_log,
+ attr->size[MLX5HWS_MATCHER_SIZE_TYPE_RX].table.sz_col_log,
attr->optimize_using_rule_idx,
attr->optimize_flow_src,
attr->insert_mode,
- attr->distribute_mode);
+ attr->distribute_mode,
+ attr->size[MLX5HWS_MATCHER_SIZE_TYPE_TX].table.sz_row_log,
+ attr->size[MLX5HWS_MATCHER_SIZE_TYPE_TX].table.sz_col_log);
return 0;
}
@@ -118,7 +120,6 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma
{
enum mlx5hws_table_type tbl_type = matcher->tbl->type;
struct mlx5hws_cmd_ft_query_attr ft_attr = {0};
- struct mlx5hws_pool *ste_pool;
u64 icm_addr_0 = 0;
u64 icm_addr_1 = 0;
u32 ste_0_id = -1;
@@ -133,12 +134,9 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma
matcher->end_ft_id,
matcher->col_matcher ? HWS_PTR_TO_ID(matcher->col_matcher) : 0);
- ste_pool = matcher->match_ste.pool;
- if (ste_pool) {
- ste_0_id = mlx5hws_pool_get_base_id(ste_pool);
- if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
- ste_1_id = mlx5hws_pool_get_base_mirror_id(ste_pool);
- }
+ ste_0_id = matcher->match_ste.ste_0_base;
+ if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
+ ste_1_id = matcher->match_ste.ste_1_base;
seq_printf(f, ",%d,%d,%d,%d",
matcher->match_ste.rtc_0_id,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
index d45e1145d197..c6436c3a7a83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
@@ -727,8 +727,9 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
u32 *s_ipv6, *d_ipv6;
if (HWS_IS_FLD_SET_SZ(match_param, outer_headers.l4_type, 0x2) ||
- HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c2, 0xe) ||
- HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c4, 0x4)) {
+ HWS_IS_FLD_SET_SZ(match_param, outer_headers.l4_type_ext, 0x4) ||
+ HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c6, 0xa) ||
+ HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_d4, 0x4)) {
mlx5hws_err(cd->ctx, "Unsupported outer parameters set\n");
return -EINVAL;
}
@@ -903,8 +904,9 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
u32 *s_ipv6, *d_ipv6;
if (HWS_IS_FLD_SET_SZ(match_param, inner_headers.l4_type, 0x2) ||
- HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c2, 0xe) ||
- HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c4, 0x4)) {
+ HWS_IS_FLD_SET_SZ(match_param, inner_headers.l4_type_ext, 0x4) ||
+ HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c6, 0xa) ||
+ HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_d4, 0x4)) {
mlx5hws_err(cd->ctx, "Unsupported inner parameters set\n");
return -EINVAL;
}
@@ -1279,7 +1281,8 @@ hws_definer_conv_misc2(struct mlx5hws_definer_conv_data *cd,
struct mlx5hws_definer_fc *curr_fc;
if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1a0, 0x8) ||
- HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1b8, 0x8) ||
+ HWS_IS_FLD_SET_SZ(match_param,
+ misc_parameters_2.ipsec_next_header, 0x8) ||
HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1c0, 0x40) ||
HWS_IS_FLD_SET(match_param, misc_parameters_2.macsec_syndrome) ||
HWS_IS_FLD_SET(match_param, misc_parameters_2.ipsec_syndrome)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
index bf4643d0ce17..57592b92e24b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
@@ -571,14 +571,12 @@ static void mlx5_fs_put_dest_action_sampler(struct mlx5_fs_hws_context *fs_ctx,
static struct mlx5hws_action *
mlx5_fs_create_action_dest_array(struct mlx5hws_context *ctx,
struct mlx5hws_action_dest_attr *dests,
- u32 num_of_dests, bool ignore_flow_level,
- u32 flow_source)
+ u32 num_of_dests, bool ignore_flow_level)
{
u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
return mlx5hws_action_create_dest_array(ctx, num_of_dests, dests,
- ignore_flow_level,
- flow_source, flags);
+ ignore_flow_level, flags);
}
static struct mlx5hws_action *
@@ -1015,7 +1013,6 @@ static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns,
}
(*ractions)[num_actions++].action = dest_actions->dest;
} else if (num_dest_actions > 1) {
- u32 flow_source = fte->act_dests.flow_context.flow_source;
bool ignore_flow_level;
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
@@ -1025,10 +1022,10 @@ static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns,
}
ignore_flow_level =
!!(fte_action->flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
- tmp_action = mlx5_fs_create_action_dest_array(ctx, dest_actions,
- num_dest_actions,
- ignore_flow_level,
- flow_source);
+ tmp_action =
+ mlx5_fs_create_action_dest_array(ctx, dest_actions,
+ num_dest_actions,
+ ignore_flow_level);
if (!tmp_action) {
err = -EOPNOTSUPP;
goto free_actions;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
index ce28ee1c0e41..f3ea09caba2b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
@@ -468,12 +468,16 @@ static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher)
struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0};
struct mlx5hws_match_template *mt = matcher->mt;
struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ union mlx5hws_matcher_size *size_rx, *size_tx;
struct mlx5hws_table *tbl = matcher->tbl;
u32 obj_id;
int ret;
- rtc_attr.log_size = attr->table.sz_row_log;
- rtc_attr.log_depth = attr->table.sz_col_log;
+ size_rx = &attr->size[MLX5HWS_MATCHER_SIZE_TYPE_RX];
+ size_tx = &attr->size[MLX5HWS_MATCHER_SIZE_TYPE_TX];
+
+ rtc_attr.log_size = size_rx->table.sz_row_log;
+ rtc_attr.log_depth = size_rx->table.sz_col_log;
rtc_attr.is_frst_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
rtc_attr.is_scnd_range = 0;
rtc_attr.miss_ft_id = matcher->end_ft_id;
@@ -507,10 +511,8 @@ static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher)
}
}
- obj_id = mlx5hws_pool_get_base_id(matcher->match_ste.pool);
-
rtc_attr.pd = ctx->pd_num;
- rtc_attr.ste_base = obj_id;
+ rtc_attr.ste_base = matcher->match_ste.ste_0_base;
rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx);
rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, false);
hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, false);
@@ -527,9 +529,9 @@ static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher)
}
if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
- obj_id = mlx5hws_pool_get_base_mirror_id(
- matcher->match_ste.pool);
- rtc_attr.ste_base = obj_id;
+ rtc_attr.log_size = size_tx->table.sz_row_log;
+ rtc_attr.log_depth = size_tx->table.sz_col_log;
+ rtc_attr.ste_base = matcher->match_ste.ste_1_base;
rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, true);
obj_id = mlx5hws_pool_get_base_mirror_id(ctx->stc_pool);
@@ -566,43 +568,38 @@ hws_matcher_check_attr_sz(struct mlx5hws_cmd_query_caps *caps,
struct mlx5hws_matcher *matcher)
{
struct mlx5hws_matcher_attr *attr = &matcher->attr;
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ union mlx5hws_matcher_size *size;
+ int i;
- if (attr->table.sz_col_log > caps->rtc_log_depth_max) {
- mlx5hws_err(matcher->tbl->ctx, "Matcher depth exceeds limit %d\n",
- caps->rtc_log_depth_max);
- return -EOPNOTSUPP;
- }
+ for (i = 0; i < 2; i++) {
+ size = &attr->size[i];
- if (attr->table.sz_col_log + attr->table.sz_row_log > caps->ste_alloc_log_max) {
- mlx5hws_err(matcher->tbl->ctx, "Total matcher size exceeds limit %d\n",
- caps->ste_alloc_log_max);
- return -EOPNOTSUPP;
- }
+ if (size->table.sz_col_log > caps->rtc_log_depth_max) {
+ mlx5hws_err(ctx, "Matcher depth exceeds limit %d\n",
+ caps->rtc_log_depth_max);
+ return -EOPNOTSUPP;
+ }
- if (attr->table.sz_col_log + attr->table.sz_row_log < caps->ste_alloc_log_gran) {
- mlx5hws_err(matcher->tbl->ctx, "Total matcher size below limit %d\n",
- caps->ste_alloc_log_gran);
- return -EOPNOTSUPP;
+ if (size->table.sz_col_log + size->table.sz_row_log >
+ caps->ste_alloc_log_max) {
+ mlx5hws_err(ctx,
+ "Total matcher size exceeds limit %d\n",
+ caps->ste_alloc_log_max);
+ return -EOPNOTSUPP;
+ }
+
+ if (size->table.sz_col_log + size->table.sz_row_log <
+ caps->ste_alloc_log_gran) {
+ mlx5hws_err(ctx, "Total matcher size below limit %d\n",
+ caps->ste_alloc_log_gran);
+ return -EOPNOTSUPP;
+ }
}
return 0;
}
-static void hws_matcher_set_pool_attr(struct mlx5hws_pool_attr *attr,
- struct mlx5hws_matcher *matcher)
-{
- switch (matcher->attr.optimize_flow_src) {
- case MLX5HWS_MATCHER_FLOW_SRC_VPORT:
- attr->opt_type = MLX5HWS_POOL_OPTIMIZE_ORIG;
- break;
- case MLX5HWS_MATCHER_FLOW_SRC_WIRE:
- attr->opt_type = MLX5HWS_POOL_OPTIMIZE_MIRROR;
- break;
- default:
- break;
- }
-}
-
static int hws_matcher_check_and_process_at(struct mlx5hws_matcher *matcher,
struct mlx5hws_action_template *at)
{
@@ -683,8 +680,9 @@ static void hws_matcher_set_ip_version_match(struct mlx5hws_matcher *matcher)
static int hws_matcher_bind_mt(struct mlx5hws_matcher *matcher)
{
+ struct mlx5hws_cmd_ste_create_attr ste_attr = {};
struct mlx5hws_context *ctx = matcher->tbl->ctx;
- struct mlx5hws_pool_attr pool_attr = {0};
+ union mlx5hws_matcher_size *size;
int ret;
/* Calculate match, range and hash definers */
@@ -699,22 +697,39 @@ static int hws_matcher_bind_mt(struct mlx5hws_matcher *matcher)
hws_matcher_set_ip_version_match(matcher);
- /* Create an STE pool per matcher*/
- pool_attr.table_type = matcher->tbl->type;
- pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
- pool_attr.alloc_log_sz = matcher->attr.table.sz_col_log +
- matcher->attr.table.sz_row_log;
- hws_matcher_set_pool_attr(&pool_attr, matcher);
-
- matcher->match_ste.pool = mlx5hws_pool_create(ctx, &pool_attr);
- if (!matcher->match_ste.pool) {
- mlx5hws_err(ctx, "Failed to allocate matcher STE pool\n");
- ret = -EOPNOTSUPP;
+ /* Create an STE range each for RX and TX. */
+ ste_attr.table_type = FS_FT_FDB_RX;
+ size = &matcher->attr.size[MLX5HWS_MATCHER_SIZE_TYPE_RX];
+ ste_attr.log_obj_range =
+ matcher->attr.optimize_flow_src ==
+ MLX5HWS_MATCHER_FLOW_SRC_VPORT ?
+ 0 : size->table.sz_col_log + size->table.sz_row_log;
+
+ ret = mlx5hws_cmd_ste_create(ctx->mdev, &ste_attr,
+ &matcher->match_ste.ste_0_base);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate RX STE range (%d)\n", ret);
goto uninit_match_definer;
}
+ ste_attr.table_type = FS_FT_FDB_TX;
+ size = &matcher->attr.size[MLX5HWS_MATCHER_SIZE_TYPE_TX];
+ ste_attr.log_obj_range =
+ matcher->attr.optimize_flow_src ==
+ MLX5HWS_MATCHER_FLOW_SRC_WIRE ?
+ 0 : size->table.sz_col_log + size->table.sz_row_log;
+
+ ret = mlx5hws_cmd_ste_create(ctx->mdev, &ste_attr,
+ &matcher->match_ste.ste_1_base);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate TX STE range (%d)\n", ret);
+ goto destroy_rx_ste_range;
+ }
+
return 0;
+destroy_rx_ste_range:
+ mlx5hws_cmd_ste_destroy(ctx->mdev, matcher->match_ste.ste_0_base);
uninit_match_definer:
if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION))
mlx5hws_definer_mt_uninit(ctx, matcher->mt);
@@ -723,9 +738,12 @@ uninit_match_definer:
static void hws_matcher_unbind_mt(struct mlx5hws_matcher *matcher)
{
- mlx5hws_pool_destroy(matcher->match_ste.pool);
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+
+ mlx5hws_cmd_ste_destroy(ctx->mdev, matcher->match_ste.ste_1_base);
+ mlx5hws_cmd_ste_destroy(ctx->mdev, matcher->match_ste.ste_0_base);
if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION))
- mlx5hws_definer_mt_uninit(matcher->tbl->ctx, matcher->mt);
+ mlx5hws_definer_mt_uninit(ctx, matcher->mt);
}
static int
@@ -734,6 +752,10 @@ hws_matcher_validate_insert_mode(struct mlx5hws_cmd_query_caps *caps,
{
struct mlx5hws_matcher_attr *attr = &matcher->attr;
struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ union mlx5hws_matcher_size *size_rx, *size_tx;
+
+ size_rx = &matcher->attr.size[MLX5HWS_MATCHER_SIZE_TYPE_RX];
+ size_tx = &matcher->attr.size[MLX5HWS_MATCHER_SIZE_TYPE_TX];
switch (attr->insert_mode) {
case MLX5HWS_MATCHER_INSERT_BY_HASH:
@@ -744,7 +766,7 @@ hws_matcher_validate_insert_mode(struct mlx5hws_cmd_query_caps *caps,
break;
case MLX5HWS_MATCHER_INSERT_BY_INDEX:
- if (attr->table.sz_col_log) {
+ if (size_rx->table.sz_col_log || size_tx->table.sz_col_log) {
mlx5hws_err(ctx, "Matcher with INSERT_BY_INDEX supports only Nx1 table size\n");
return -EOPNOTSUPP;
}
@@ -764,7 +786,10 @@ hws_matcher_validate_insert_mode(struct mlx5hws_cmd_query_caps *caps,
return -EOPNOTSUPP;
}
- if (attr->table.sz_row_log > MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX) {
+ if (size_rx->table.sz_row_log >
+ MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX ||
+ size_tx->table.sz_row_log >
+ MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX) {
mlx5hws_err(ctx, "Matcher with linear distribute: rows exceed limit %d",
MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX);
return -EOPNOTSUPP;
@@ -788,6 +813,10 @@ hws_matcher_process_attr(struct mlx5hws_cmd_query_caps *caps,
struct mlx5hws_matcher *matcher)
{
struct mlx5hws_matcher_attr *attr = &matcher->attr;
+ union mlx5hws_matcher_size *size_rx, *size_tx;
+
+ size_rx = &attr->size[MLX5HWS_MATCHER_SIZE_TYPE_RX];
+ size_tx = &attr->size[MLX5HWS_MATCHER_SIZE_TYPE_TX];
if (hws_matcher_validate_insert_mode(caps, matcher))
return -EOPNOTSUPP;
@@ -799,8 +828,12 @@ hws_matcher_process_attr(struct mlx5hws_cmd_query_caps *caps,
/* Convert number of rules to the required depth */
if (attr->mode == MLX5HWS_MATCHER_RESOURCE_MODE_RULE &&
- attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH)
- attr->table.sz_col_log = hws_matcher_rules_to_tbl_depth(attr->rule.num_log);
+ attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH) {
+ size_rx->table.sz_col_log =
+ hws_matcher_rules_to_tbl_depth(size_rx->rule.num_log);
+ size_tx->table.sz_col_log =
+ hws_matcher_rules_to_tbl_depth(size_tx->rule.num_log);
+ }
matcher->flags |= attr->resizable ? MLX5HWS_MATCHER_FLAGS_RESIZABLE : 0;
matcher->flags |= attr->isolated_matcher_end_ft_id ?
@@ -861,14 +894,19 @@ static int
hws_matcher_create_col_matcher(struct mlx5hws_matcher *matcher)
{
struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ union mlx5hws_matcher_size *size_rx, *size_tx;
struct mlx5hws_matcher *col_matcher;
- int ret;
+ int i, ret;
+
+ size_rx = &matcher->attr.size[MLX5HWS_MATCHER_SIZE_TYPE_RX];
+ size_tx = &matcher->attr.size[MLX5HWS_MATCHER_SIZE_TYPE_TX];
if (matcher->attr.mode != MLX5HWS_MATCHER_RESOURCE_MODE_RULE ||
matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX)
return 0;
- if (!hws_matcher_requires_col_tbl(matcher->attr.rule.num_log))
+ if (!hws_matcher_requires_col_tbl(size_rx->rule.num_log) &&
+ !hws_matcher_requires_col_tbl(size_tx->rule.num_log))
return 0;
col_matcher = kzalloc(sizeof(*matcher), GFP_KERNEL);
@@ -885,10 +923,16 @@ hws_matcher_create_col_matcher(struct mlx5hws_matcher *matcher)
col_matcher->flags |= MLX5HWS_MATCHER_FLAGS_COLLISION;
col_matcher->attr.mode = MLX5HWS_MATCHER_RESOURCE_MODE_HTABLE;
col_matcher->attr.optimize_flow_src = matcher->attr.optimize_flow_src;
- col_matcher->attr.table.sz_row_log = matcher->attr.rule.num_log;
- col_matcher->attr.table.sz_col_log = MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH;
- if (col_matcher->attr.table.sz_row_log > MLX5HWS_MATCHER_ASSURED_ROW_RATIO)
- col_matcher->attr.table.sz_row_log -= MLX5HWS_MATCHER_ASSURED_ROW_RATIO;
+ for (i = 0; i < 2; i++) {
+ union mlx5hws_matcher_size *dst = &col_matcher->attr.size[i];
+ union mlx5hws_matcher_size *src = &matcher->attr.size[i];
+
+ dst->table.sz_row_log = src->rule.num_log;
+ dst->table.sz_col_log = MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH;
+ if (dst->table.sz_row_log > MLX5HWS_MATCHER_ASSURED_ROW_RATIO)
+ dst->table.sz_row_log -=
+ MLX5HWS_MATCHER_ASSURED_ROW_RATIO;
+ }
col_matcher->attr.max_num_of_at_attach = matcher->attr.max_num_of_at_attach;
col_matcher->attr.isolated_matcher_end_ft_id =
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h
index 32e83cddcd60..ae20bcebfdde 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h
@@ -48,7 +48,8 @@ struct mlx5hws_match_template {
struct mlx5hws_matcher_match_ste {
u32 rtc_0_id;
u32 rtc_1_id;
- struct mlx5hws_pool *pool;
+ u32 ste_0_base;
+ u32 ste_1_base;
};
enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
index d8ac6c196211..59c14745ed0c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
@@ -93,6 +93,23 @@ enum mlx5hws_matcher_distribute_mode {
MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR = 0x1,
};
+enum mlx5hws_matcher_size_type {
+ MLX5HWS_MATCHER_SIZE_TYPE_RX,
+ MLX5HWS_MATCHER_SIZE_TYPE_TX,
+ MLX5HWS_MATCHER_SIZE_TYPE_MAX,
+};
+
+union mlx5hws_matcher_size {
+ struct {
+ u8 sz_row_log;
+ u8 sz_col_log;
+ } table;
+
+ struct {
+ u8 num_log;
+ } rule;
+};
+
struct mlx5hws_matcher_attr {
/* Processing priority inside table */
u32 priority;
@@ -107,16 +124,7 @@ struct mlx5hws_matcher_attr {
enum mlx5hws_matcher_distribute_mode distribute_mode;
/* Define whether the created matcher supports resizing into a bigger matcher */
bool resizable;
- union {
- struct {
- u8 sz_row_log;
- u8 sz_col_log;
- } table;
-
- struct {
- u8 num_log;
- } rule;
- };
+ union mlx5hws_matcher_size size[MLX5HWS_MATCHER_SIZE_TYPE_MAX];
/* Optional AT attach configuration - Max number of additional AT */
u8 max_num_of_at_attach;
/* Optional end FT (miss FT ID) for match RTC (for isolated matcher) */
@@ -727,18 +735,14 @@ mlx5hws_action_create_push_vlan(struct mlx5hws_context *ctx, u32 flags);
* @dests: The destination array. Each contains a destination action and can
* have additional actions.
* @ignore_flow_level: Whether to turn on 'ignore_flow_level' for this dest.
- * @flow_source: Source port of the traffic for this actions.
* @flags: Action creation flags (enum mlx5hws_action_flags).
*
* Return: pointer to mlx5hws_action on success NULL otherwise.
*/
struct mlx5hws_action *
-mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx,
- size_t num_dest,
+mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx, size_t num_dest,
struct mlx5hws_action_dest_attr *dests,
- bool ignore_flow_level,
- u32 flow_source,
- u32 flags);
+ bool ignore_flow_level, u32 flags);
/**
* mlx5hws_action_create_insert_header - Create insert header action.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c
index 5342a4cc7194..a94f094e72ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c
@@ -3,10 +3,8 @@
#include "internal.h"
-static void hws_rule_skip(struct mlx5hws_matcher *matcher,
- struct mlx5hws_match_template *mt,
- u32 flow_source,
- bool *skip_rx, bool *skip_tx)
+void mlx5hws_rule_skip(struct mlx5hws_matcher *matcher, u32 flow_source,
+ bool *skip_rx, bool *skip_tx)
{
/* By default FDB rules are added to both RX and TX */
*skip_rx = false;
@@ -14,20 +12,21 @@ static void hws_rule_skip(struct mlx5hws_matcher *matcher,
if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT) {
*skip_rx = true;
- } else if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK) {
+ return;
+ }
+
+ if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK) {
*skip_tx = true;
- } else {
- /* If no flow source was set for current rule,
- * check for flow source in matcher attributes.
- */
- if (matcher->attr.optimize_flow_src) {
- *skip_tx =
- matcher->attr.optimize_flow_src == MLX5HWS_MATCHER_FLOW_SRC_WIRE;
- *skip_rx =
- matcher->attr.optimize_flow_src == MLX5HWS_MATCHER_FLOW_SRC_VPORT;
- return;
- }
+ return;
}
+
+ /* If no flow source was set for current rule,
+ * check for flow source in matcher attributes.
+ */
+ *skip_tx = matcher->attr.optimize_flow_src ==
+ MLX5HWS_MATCHER_FLOW_SRC_WIRE;
+ *skip_rx = matcher->attr.optimize_flow_src ==
+ MLX5HWS_MATCHER_FLOW_SRC_VPORT;
}
static void
@@ -66,7 +65,8 @@ static void hws_rule_init_dep_wqe(struct mlx5hws_send_ring_dep_wqe *dep_wqe,
attr->rule_idx : 0;
if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
- hws_rule_skip(matcher, mt, attr->flow_source, &skip_rx, &skip_tx);
+ mlx5hws_rule_skip(matcher, attr->flow_source,
+ &skip_rx, &skip_tx);
if (!skip_rx) {
dep_wqe->rtc_0 = matcher->match_ste.rtc_0_id;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h
index 1c47a9c11572..d0f082b8dbf5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h
@@ -69,6 +69,9 @@ struct mlx5hws_rule {
*/
};
+void mlx5hws_rule_skip(struct mlx5hws_matcher *matcher, u32 flow_source,
+ bool *skip_rx, bool *skip_tx);
+
void mlx5hws_rule_free_action_ste(struct mlx5hws_action_ste_chunk *action_ste);
int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wc.c b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
index 740b719e7072..2f0316616fa4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
@@ -378,6 +378,9 @@ err_create_cq:
mlx5_free_bfreg(mdev, &sq->bfreg);
err_alloc_bfreg:
kfree(sq);
+
+ if (mdev->wc_state == MLX5_WC_STATE_UNSUPPORTED)
+ mlx5_core_warn(mdev, "Write combining is not supported\n");
}
bool mlx5_wc_support_get(struct mlx5_core_dev *mdev)
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index d76d7a945899..d1f8a72cae53 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -142,8 +142,10 @@ static int mlxbf_gige_open(struct net_device *netdev)
mlxbf_gige_cache_stats(priv);
err = mlxbf_gige_clean_port(priv);
- if (err)
+ if (err) {
+ dev_err(priv->dev, "open: clean_port failed: %pe\n", ERR_PTR(err));
return err;
+ }
/* Clear driver's valid_polarity to match hardware,
* since the above call to clean_port() resets the
@@ -154,19 +156,25 @@ static int mlxbf_gige_open(struct net_device *netdev)
phy_start(phydev);
err = mlxbf_gige_tx_init(priv);
- if (err)
+ if (err) {
+ dev_err(priv->dev, "open: tx_init failed: %pe\n", ERR_PTR(err));
goto phy_deinit;
+ }
err = mlxbf_gige_rx_init(priv);
- if (err)
+ if (err) {
+ dev_err(priv->dev, "open: rx_init failed: %pe\n", ERR_PTR(err));
goto tx_deinit;
+ }
netif_napi_add(netdev, &priv->napi, mlxbf_gige_poll);
napi_enable(&priv->napi);
netif_start_queue(netdev);
err = mlxbf_gige_request_irqs(priv);
- if (err)
+ if (err) {
+ dev_err(priv->dev, "open: request_irqs failed: %pe\n", ERR_PTR(err));
goto napi_deinit;
+ }
mlxbf_gige_enable_mac_rx_filter(priv, MLXBF_GIGE_BCAST_MAC_FILTER_IDX);
mlxbf_gige_enable_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX);
@@ -418,8 +426,10 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
/* Attach MDIO device */
err = mlxbf_gige_mdio_probe(pdev, priv);
- if (err)
+ if (err) {
+ dev_err(priv->dev, "probe: mdio_probe failed: %pe\n", ERR_PTR(err));
return err;
+ }
priv->base = base;
priv->llu_base = llu_base;
@@ -438,7 +448,7 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
+ dev_err(&pdev->dev, "DMA configuration failed: %pe\n", ERR_PTR(err));
goto out;
}
@@ -468,7 +478,7 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
mlxbf_gige_link_cfgs[priv->hw_version].adjust_link,
mlxbf_gige_link_cfgs[priv->hw_version].phy_mode);
if (err) {
- dev_err(&pdev->dev, "Could not attach to PHY\n");
+ dev_err(&pdev->dev, "Could not attach to PHY: %pe\n", ERR_PTR(err));
goto out;
}
@@ -479,7 +489,7 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
err = register_netdev(netdev);
if (err) {
- dev_err(&pdev->dev, "Failed to register netdev\n");
+ dev_err(&pdev->dev, "Failed to register netdev: %pe\n", ERR_PTR(err));
phy_disconnect(phydev);
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 058dcabfaa2e..8769cba2c746 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -156,7 +156,7 @@ static int mlxsw_pci_napi_devs_init(struct mlxsw_pci *mlxsw_pci)
}
strscpy(mlxsw_pci->napi_dev_rx->name, "mlxsw_rx",
sizeof(mlxsw_pci->napi_dev_rx->name));
- dev_set_threaded(mlxsw_pci->napi_dev_rx, true);
+ netif_threaded_enable(mlxsw_pci->napi_dev_rx);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index e8182dd76c7d..5b9f0844b8f6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -131,7 +131,7 @@ static u64 __mlxsw_sp1_ptp_read_frc(struct mlxsw_sp1_ptp_clock *clock,
return (u64) frc_l | (u64) frc_h2 << 32;
}
-static u64 mlxsw_sp1_ptp_read_frc(const struct cyclecounter *cc)
+static u64 mlxsw_sp1_ptp_read_frc(struct cyclecounter *cc)
{
struct mlxsw_sp1_ptp_clock *clock =
container_of(cc, struct mlxsw_sp1_ptp_clock, cycles);
diff --git a/drivers/net/ethernet/meta/fbnic/Makefile b/drivers/net/ethernet/meta/fbnic/Makefile
index 0dbc634adb4b..15e8ff649615 100644
--- a/drivers/net/ethernet/meta/fbnic/Makefile
+++ b/drivers/net/ethernet/meta/fbnic/Makefile
@@ -12,6 +12,7 @@ fbnic-y := fbnic_csr.o \
fbnic_devlink.o \
fbnic_ethtool.o \
fbnic_fw.o \
+ fbnic_fw_log.o \
fbnic_hw_stats.o \
fbnic_hwmon.o \
fbnic_irq.o \
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h
index 65815d4f379e..c376e06880c9 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic.h
@@ -12,6 +12,7 @@
#include "fbnic_csr.h"
#include "fbnic_fw.h"
+#include "fbnic_fw_log.h"
#include "fbnic_hw_stats.h"
#include "fbnic_mac.h"
#include "fbnic_rpc.h"
@@ -85,6 +86,8 @@ struct fbnic_dev {
/* Lock protecting access to hw_stats */
spinlock_t hw_stats_lock;
+
+ struct fbnic_fw_log fw_log;
};
/* Reserve entry 0 in the MSI-X "others" array until we have filled all
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
index 36393a17d92d..a81db842aa53 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
@@ -12,13 +12,28 @@
#define DESC_BIT(nr) BIT_ULL(nr)
#define DESC_GENMASK(h, l) GENMASK_ULL(h, l)
+#define FW_VER_CODE(_major, _minor, _patch, _build) ( \
+ FIELD_PREP(FBNIC_FW_CAP_RESP_VERSION_MAJOR, _major) | \
+ FIELD_PREP(FBNIC_FW_CAP_RESP_VERSION_MINOR, _minor) | \
+ FIELD_PREP(FBNIC_FW_CAP_RESP_VERSION_PATCH, _patch) | \
+ FIELD_PREP(FBNIC_FW_CAP_RESP_VERSION_BUILD, _build))
+
/* Defines the minimum firmware version required by the driver */
-#define MIN_FW_MAJOR_VERSION 0
-#define MIN_FW_MINOR_VERSION 10
-#define MIN_FW_BUILD_VERSION 6
-#define MIN_FW_VERSION_CODE (MIN_FW_MAJOR_VERSION * (1u << 24) + \
- MIN_FW_MINOR_VERSION * (1u << 16) + \
- MIN_FW_BUILD_VERSION)
+#define MIN_FW_VER_CODE FW_VER_CODE(0, 10, 6, 0)
+
+/* Defines the minimum firmware version required for firmware logs */
+#define MIN_FW_VER_CODE_LOG FW_VER_CODE(0, 12, 9, 0)
+
+/* Driver can request that firmware sends all cached logs in bulk. This
+ * feature was enabled on older firmware however firmware has a bug
+ * which attempted to send 30 messages per mbx message which caused an
+ * overflow flooding the mailbox. This results in a kernel warning
+ * related to corrupt mailbox messages.
+ *
+ * If firmware is new enough only request sending historical logs when
+ * the log buffer is empty to prevent duplicate logs.
+ */
+#define MIN_FW_VER_CODE_HIST FW_VER_CODE(25, 5, 7, 0)
#define PCI_DEVICE_ID_META_FBNIC_ASIC 0x0013
@@ -446,6 +461,26 @@ enum {
#define FBNIC_TMI_ILLEGAL_PTP_REQS 0x04409 /* 0x11024 */
#define FBNIC_TMI_GOOD_PTP_TS 0x0440a /* 0x11028 */
#define FBNIC_TMI_BAD_PTP_TS 0x0440b /* 0x1102c */
+#define FBNIC_TMI_STAT_TX_PACKET_1519_2047_BYTES_L \
+ 0x04433 /* 0x110cc */
+#define FBNIC_TMI_STAT_TX_PACKET_1519_2047_BYTES_H \
+ 0x04434 /* 0x110d0 */
+#define FBNIC_TMI_STAT_TX_PACKET_2048_4095_BYTES_L \
+ 0x04435 /* 0x110d4 */
+#define FBNIC_TMI_STAT_TX_PACKET_2048_4095_BYTES_H \
+ 0x04436 /* 0x110d8 */
+#define FBNIC_TMI_STAT_TX_PACKET_4096_8191_BYTES_L \
+ 0x04437 /* 0x110dc */
+#define FBNIC_TMI_STAT_TX_PACKET_4096_8191_BYTES_H \
+ 0x04438 /* 0x110e0 */
+#define FBNIC_TMI_STAT_TX_PACKET_8192_9216_BYTES_L \
+ 0x04439 /* 0x110e4 */
+#define FBNIC_TMI_STAT_TX_PACKET_8192_9216_BYTES_H \
+ 0x0443a /* 0x110e8 */
+#define FBNIC_TMI_STAT_TX_PACKET_9217_MAX_BYTES_L \
+ 0x0443b /* 0x110ec */
+#define FBNIC_TMI_STAT_TX_PACKET_9217_MAX_BYTES_H \
+ 0x0443c /* 0x110f0 */
#define FBNIC_CSR_END_TMI 0x0443f /* CSR section delimiter */
/* Precision Time Protocol Registers */
@@ -473,7 +508,7 @@ enum {
#define FBNIC_PTP_ADD_VAL_NS 0x04806 /* 0x12018 */
#define FBNIC_PTP_ADD_VAL_NS_MASK CSR_GENMASK(15, 0)
-#define FBNIC_PTP_ADD_VAL_SUBNS 0x04807 /* 0x1201c */
+#define FBNIC_PTP_ADD_VAL_SUBNS 0x04807 /* 0x1201c */
#define FBNIC_PTP_CTR_VAL_HI 0x04808 /* 0x12020 */
#define FBNIC_PTP_CTR_VAL_LO 0x04809 /* 0x12024 */
@@ -674,6 +709,26 @@ enum {
#define FBNIC_RPC_CNTR_OVR_SIZE_ERR 0x084a6 /* 0x21298 */
#define FBNIC_RPC_TCAM_MACDA_VALIDATE 0x0852d /* 0x214b4 */
+#define FBNIC_RPC_STAT_RX_PACKET_1519_2047_BYTES_L \
+ 0x0855f /* 0x2157c */
+#define FBNIC_RPC_STAT_RX_PACKET_1519_2047_BYTES_H \
+ 0x08560 /* 0x21580 */
+#define FBNIC_RPC_STAT_RX_PACKET_2048_4095_BYTES_L \
+ 0x08561 /* 0x21584 */
+#define FBNIC_RPC_STAT_RX_PACKET_2048_4095_BYTES_H \
+ 0x08562 /* 0x21588 */
+#define FBNIC_RPC_STAT_RX_PACKET_4096_8191_BYTES_L \
+ 0x08563 /* 0x2158c */
+#define FBNIC_RPC_STAT_RX_PACKET_4096_8191_BYTES_H \
+ 0x08564 /* 0x21590 */
+#define FBNIC_RPC_STAT_RX_PACKET_8192_9216_BYTES_L \
+ 0x08565 /* 0x21594 */
+#define FBNIC_RPC_STAT_RX_PACKET_8192_9216_BYTES_H \
+ 0x08566 /* 0x21598 */
+#define FBNIC_RPC_STAT_RX_PACKET_9217_MAX_BYTES_L \
+ 0x08567 /* 0x2159c */
+#define FBNIC_RPC_STAT_RX_PACKET_9217_MAX_BYTES_H \
+ 0x08568 /* 0x215a0 */
#define FBNIC_CSR_END_RPC 0x0856b /* CSR section delimiter */
/* RPC RAM Registers */
@@ -776,16 +831,12 @@ enum {
#define FBNIC_CSR_START_MAC_STAT 0x11a00
#define FBNIC_MAC_STAT_RX_BYTE_COUNT_L 0x11a08 /* 0x46820 */
#define FBNIC_MAC_STAT_RX_BYTE_COUNT_H 0x11a09 /* 0x46824 */
-#define FBNIC_MAC_STAT_RX_ALIGN_ERROR_L \
- 0x11a0a /* 0x46828 */
-#define FBNIC_MAC_STAT_RX_ALIGN_ERROR_H \
- 0x11a0b /* 0x4682c */
+#define FBNIC_MAC_STAT_RX_ALIGN_ERROR_L 0x11a0a /* 0x46828 */
+#define FBNIC_MAC_STAT_RX_ALIGN_ERROR_H 0x11a0b /* 0x4682c */
#define FBNIC_MAC_STAT_RX_TOOLONG_L 0x11a0e /* 0x46838 */
#define FBNIC_MAC_STAT_RX_TOOLONG_H 0x11a0f /* 0x4683c */
-#define FBNIC_MAC_STAT_RX_RECEIVED_OK_L \
- 0x11a12 /* 0x46848 */
-#define FBNIC_MAC_STAT_RX_RECEIVED_OK_H \
- 0x11a13 /* 0x4684c */
+#define FBNIC_MAC_STAT_RX_RECEIVED_OK_L 0x11a12 /* 0x46848 */
+#define FBNIC_MAC_STAT_RX_RECEIVED_OK_H 0x11a13 /* 0x4684c */
#define FBNIC_MAC_STAT_RX_PACKET_BAD_FCS_L \
0x11a14 /* 0x46850 */
#define FBNIC_MAC_STAT_RX_PACKET_BAD_FCS_H \
@@ -796,20 +847,90 @@ enum {
#define FBNIC_MAC_STAT_RX_MULTICAST_H 0x11a1d /* 0x46874 */
#define FBNIC_MAC_STAT_RX_BROADCAST_L 0x11a1e /* 0x46878 */
#define FBNIC_MAC_STAT_RX_BROADCAST_H 0x11a1f /* 0x4687c */
+#define FBNIC_MAC_STAT_RX_UNDERSIZE_L 0x11a24 /* 0x46890 */
+#define FBNIC_MAC_STAT_RX_UNDERSIZE_H 0x11a25 /* 0x46894 */
+#define FBNIC_MAC_STAT_RX_PACKET_64_BYTES_L \
+ 0x11a26 /* 0x46898 */
+#define FBNIC_MAC_STAT_RX_PACKET_64_BYTES_H \
+ 0x11a27 /* 0x4689c */
+#define FBNIC_MAC_STAT_RX_PACKET_65_127_BYTES_L \
+ 0x11a28 /* 0x468a0 */
+#define FBNIC_MAC_STAT_RX_PACKET_65_127_BYTES_H \
+ 0x11a29 /* 0x468a4 */
+#define FBNIC_MAC_STAT_RX_PACKET_128_255_BYTES_L \
+ 0x11a2a /* 0x468a8 */
+#define FBNIC_MAC_STAT_RX_PACKET_128_255_BYTES_H \
+ 0x11a2b /* 0x468ac */
+#define FBNIC_MAC_STAT_RX_PACKET_256_511_BYTES_L \
+ 0x11a2c /* 0x468b0 */
+#define FBNIC_MAC_STAT_RX_PACKET_256_511_BYTES_H \
+ 0x11a2d /* 0x468b4 */
+#define FBNIC_MAC_STAT_RX_PACKET_512_1023_BYTES_L \
+ 0x11a2e /* 0x468b8 */
+#define FBNIC_MAC_STAT_RX_PACKET_512_1023_BYTES_H \
+ 0x11a2f /* 0x468bc */
+#define FBNIC_MAC_STAT_RX_PACKET_1024_1518_BYTES_L \
+ 0x11a30 /* 0x468c0 */
+#define FBNIC_MAC_STAT_RX_PACKET_1024_1518_BYTES_H \
+ 0x11a31 /* 0x468c4 */
+#define FBNIC_MAC_STAT_RX_PACKET_1519_MAX_BYTES_L \
+ 0x11a32 /* 0x468c8 */
+#define FBNIC_MAC_STAT_RX_PACKET_1519_MAX_BYTES_H \
+ 0x11a33 /* 0x468cc */
+#define FBNIC_MAC_STAT_RX_OVERSIZE_L 0x11a34 /* 0x468d0 */
+#define FBNIC_MAC_STAT_RX_OVERSSIZE_H 0x11a35 /* 0x468d4 */
+#define FBNIC_MAC_STAT_RX_JABBER_L 0x11a36 /* 0x468d8 */
+#define FBNIC_MAC_STAT_RX_JABBER_H 0x11a37 /* 0x468dc */
+#define FBNIC_MAC_STAT_RX_FRAGMENT_L 0x11a38 /* 0x468e0 */
+#define FBNIC_MAC_STAT_RX_FRAGMENT_H 0x11a39 /* 0x468e4 */
+#define FBNIC_MAC_STAT_RX_CONTROL_FRAMES_L \
+ 0x11a3c /* 0x468f0 */
+#define FBNIC_MAC_STAT_RX_CONTROL_FRAMES_H \
+ 0x11a3d /* 0x468f4 */
#define FBNIC_MAC_STAT_TX_BYTE_COUNT_L 0x11a3e /* 0x468f8 */
#define FBNIC_MAC_STAT_TX_BYTE_COUNT_H 0x11a3f /* 0x468fc */
#define FBNIC_MAC_STAT_TX_TRANSMITTED_OK_L \
0x11a42 /* 0x46908 */
#define FBNIC_MAC_STAT_TX_TRANSMITTED_OK_H \
0x11a43 /* 0x4690c */
-#define FBNIC_MAC_STAT_TX_IFOUTERRORS_L \
- 0x11a46 /* 0x46918 */
-#define FBNIC_MAC_STAT_TX_IFOUTERRORS_H \
- 0x11a47 /* 0x4691c */
+#define FBNIC_MAC_STAT_TX_IFOUTERRORS_L 0x11a46 /* 0x46918 */
+#define FBNIC_MAC_STAT_TX_IFOUTERRORS_H 0x11a47 /* 0x4691c */
#define FBNIC_MAC_STAT_TX_MULTICAST_L 0x11a4a /* 0x46928 */
#define FBNIC_MAC_STAT_TX_MULTICAST_H 0x11a4b /* 0x4692c */
#define FBNIC_MAC_STAT_TX_BROADCAST_L 0x11a4c /* 0x46930 */
#define FBNIC_MAC_STAT_TX_BROADCAST_H 0x11a4d /* 0x46934 */
+#define FBNIC_MAC_STAT_TX_PACKET_64_BYTES_L \
+ 0x11a4e /* 0x46938 */
+#define FBNIC_MAC_STAT_TX_PACKET_64_BYTES_H \
+ 0x11a4f /* 0x4693c */
+#define FBNIC_MAC_STAT_TX_PACKET_65_127_BYTES_L \
+ 0x11a50 /* 0x46940 */
+#define FBNIC_MAC_STAT_TX_PACKET_65_127_BYTES_H \
+ 0x11a51 /* 0x46944 */
+#define FBNIC_MAC_STAT_TX_PACKET_128_255_BYTES_L \
+ 0x11a52 /* 0x46948 */
+#define FBNIC_MAC_STAT_TX_PACKET_128_255_BYTES_H \
+ 0x11a53 /* 0x4694c */
+#define FBNIC_MAC_STAT_TX_PACKET_256_511_BYTES_L \
+ 0x11a54 /* 0x46950 */
+#define FBNIC_MAC_STAT_TX_PACKET_256_511_BYTES_H \
+ 0x11a55 /* 0x46954 */
+#define FBNIC_MAC_STAT_TX_PACKET_512_1023_BYTES_L \
+ 0x11a56 /* 0x46958 */
+#define FBNIC_MAC_STAT_TX_PACKET_512_1023_BYTES_H \
+ 0x11a57 /* 0x4695c */
+#define FBNIC_MAC_STAT_TX_PACKET_1024_1518_BYTES_L \
+ 0x11a58 /* 0x46960 */
+#define FBNIC_MAC_STAT_TX_PACKET_1024_1518_BYTES_H \
+ 0x11a59 /* 0x46964 */
+#define FBNIC_MAC_STAT_TX_PACKET_1519_MAX_BYTES_L \
+ 0x11a5a /* 0x46968 */
+#define FBNIC_MAC_STAT_TX_PACKET_1519_MAX_BYTES_H \
+ 0x11a5b /* 0x4696c */
+#define FBNIC_MAC_STAT_TX_CONTROL_FRAMES_L \
+ 0x11a5e /* 0x46978 */
+#define FBNIC_MAC_STAT_TX_CONTROL_FRAMES_H \
+ 0x11a5f /* 0x4697c */
/* PCIE Comphy Registers */
#define FBNIC_CSR_START_PCIE_SS_COMPHY 0x2442e /* CSR section delimiter */
@@ -857,7 +978,7 @@ enum {
0x3107e /* 0xc41f8 */
#define FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_63_32 \
0x3107f /* 0xc41fc */
-#define FBNIC_CSR_END_PUL_USER 0x310ea /* CSR section delimiter */
+#define FBNIC_CSR_END_PUL_USER 0x310ea /* CSR section delimiter */
/* Queue Registers
*
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c b/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
index e8f2d7f2d962..b7238dd967fe 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
@@ -170,6 +170,33 @@ static int fbnic_dbg_ipo_dst_show(struct seq_file *s, void *v)
}
DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ipo_dst);
+static int fbnic_dbg_fw_log_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+ struct fbnic_fw_log_entry *entry;
+ unsigned long flags;
+
+ if (!fbnic_fw_log_ready(fbd))
+ return -ENXIO;
+
+ spin_lock_irqsave(&fbd->fw_log.lock, flags);
+
+ list_for_each_entry_reverse(entry, &fbd->fw_log.entries, list) {
+ seq_printf(s, FBNIC_FW_LOG_FMT, entry->index,
+ (entry->timestamp / (MSEC_PER_SEC * 60 * 60 * 24)),
+ (entry->timestamp / (MSEC_PER_SEC * 60 * 60)) % 24,
+ ((entry->timestamp / (MSEC_PER_SEC * 60) % 60)),
+ ((entry->timestamp / MSEC_PER_SEC) % 60),
+ (entry->timestamp % MSEC_PER_SEC),
+ entry->msg);
+ }
+
+ spin_unlock_irqrestore(&fbd->fw_log.lock, flags);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_fw_log);
+
static int fbnic_dbg_pcie_stats_show(struct seq_file *s, void *v)
{
struct fbnic_dev *fbd = s->private;
@@ -222,6 +249,8 @@ void fbnic_dbg_fbd_init(struct fbnic_dev *fbd)
&fbnic_dbg_ipo_src_fops);
debugfs_create_file("ipo_dst", 0400, fbd->dbg_fbd, fbd,
&fbnic_dbg_ipo_dst_fops);
+ debugfs_create_file("fw_log", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_fw_log_fops);
}
void fbnic_dbg_fbd_exit(struct fbnic_dev *fbd)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
index 4c4938eedd7b..c5f81f139e7e 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
@@ -182,7 +182,7 @@ fbnic_flash_start(struct fbnic_dev *fbd, struct pldmfw_component *component)
else
err = -ETIMEDOUT;
- fbnic_fw_clear_cmpl(fbd, cmpl);
+ fbnic_mbx_clear_cmpl(fbd, cmpl);
cmpl_free:
fbnic_fw_put_cmpl(cmpl);
@@ -300,7 +300,7 @@ err_no_msg:
component_name, 0, 0);
}
- fbnic_fw_clear_cmpl(fbd, cmpl);
+ fbnic_mbx_clear_cmpl(fbd, cmpl);
cmpl_free:
fbnic_fw_put_cmpl(cmpl);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
index 5c7556c8c4c5..dc7ba8d5fc43 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
@@ -531,20 +531,6 @@ static int fbnic_get_rss_hash_idx(u32 flow_type)
return -1;
}
-static int
-fbnic_get_rss_hash_opts(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd)
-{
- int hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
-
- if (hash_opt_idx < 0)
- return -EINVAL;
-
- /* Report options from rss_en table in fbn */
- cmd->data = fbn->rss_flow_hash[hash_opt_idx];
-
- return 0;
-}
-
static int fbnic_get_cls_rule_all(struct fbnic_net *fbn,
struct ethtool_rxnfc *cmd,
u32 *rule_locs)
@@ -779,9 +765,6 @@ static int fbnic_get_rxnfc(struct net_device *netdev,
cmd->data = fbn->num_rx_queues;
ret = 0;
break;
- case ETHTOOL_GRXFH:
- ret = fbnic_get_rss_hash_opts(fbn, cmd);
- break;
case ETHTOOL_GRXCLSRULE:
ret = fbnic_get_cls_rule(fbn, cmd);
break;
@@ -803,41 +786,6 @@ static int fbnic_get_rxnfc(struct net_device *netdev,
return ret;
}
-#define FBNIC_L2_HASH_OPTIONS \
- (RXH_L2DA | RXH_DISCARD)
-#define FBNIC_L3_HASH_OPTIONS \
- (FBNIC_L2_HASH_OPTIONS | RXH_IP_SRC | RXH_IP_DST)
-#define FBNIC_L4_HASH_OPTIONS \
- (FBNIC_L3_HASH_OPTIONS | RXH_L4_B_0_1 | RXH_L4_B_2_3)
-
-static int
-fbnic_set_rss_hash_opts(struct fbnic_net *fbn, const struct ethtool_rxnfc *cmd)
-{
- int hash_opt_idx;
-
- /* Verify the type requested is correct */
- hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
- if (hash_opt_idx < 0)
- return -EINVAL;
-
- /* Verify the fields asked for can actually be assigned based on type */
- if (cmd->data & ~FBNIC_L4_HASH_OPTIONS ||
- (hash_opt_idx > FBNIC_L4_HASH_OPT &&
- cmd->data & ~FBNIC_L3_HASH_OPTIONS) ||
- (hash_opt_idx > FBNIC_IP_HASH_OPT &&
- cmd->data & ~FBNIC_L2_HASH_OPTIONS))
- return -EINVAL;
-
- fbn->rss_flow_hash[hash_opt_idx] = cmd->data;
-
- if (netif_running(fbn->netdev)) {
- fbnic_rss_reinit(fbn->fbd, fbn);
- fbnic_write_rules(fbn->fbd);
- }
-
- return 0;
-}
-
static int fbnic_cls_rule_any_loc(struct fbnic_dev *fbd)
{
int i;
@@ -1244,9 +1192,6 @@ static int fbnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = fbnic_set_rss_hash_opts(fbn, cmd);
- break;
case ETHTOOL_SRXCLSRLINS:
ret = fbnic_set_cls_rule_ins(fbn, cmd);
break;
@@ -1347,6 +1292,60 @@ fbnic_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
}
static int
+fbnic_get_rss_hash_opts(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
+{
+ int hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ if (hash_opt_idx < 0)
+ return -EINVAL;
+
+ /* Report options from rss_en table in fbn */
+ cmd->data = fbn->rss_flow_hash[hash_opt_idx];
+
+ return 0;
+}
+
+#define FBNIC_L2_HASH_OPTIONS \
+ (RXH_L2DA | RXH_DISCARD)
+#define FBNIC_L3_HASH_OPTIONS \
+ (FBNIC_L2_HASH_OPTIONS | RXH_IP_SRC | RXH_IP_DST)
+#define FBNIC_L4_HASH_OPTIONS \
+ (FBNIC_L3_HASH_OPTIONS | RXH_L4_B_0_1 | RXH_L4_B_2_3)
+
+static int
+fbnic_set_rss_hash_opts(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ int hash_opt_idx;
+
+ /* Verify the type requested is correct */
+ hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
+ if (hash_opt_idx < 0)
+ return -EINVAL;
+
+ /* Verify the fields asked for can actually be assigned based on type */
+ if (cmd->data & ~FBNIC_L4_HASH_OPTIONS ||
+ (hash_opt_idx > FBNIC_L4_HASH_OPT &&
+ cmd->data & ~FBNIC_L3_HASH_OPTIONS) ||
+ (hash_opt_idx > FBNIC_IP_HASH_OPT &&
+ cmd->data & ~FBNIC_L2_HASH_OPTIONS))
+ return -EINVAL;
+
+ fbn->rss_flow_hash[hash_opt_idx] = cmd->data;
+
+ if (netif_running(fbn->netdev)) {
+ fbnic_rss_reinit(fbn->fbd, fbn);
+ fbnic_write_rules(fbn->fbd);
+ }
+
+ return 0;
+}
+
+static int
fbnic_modify_rxfh_context(struct net_device *netdev,
struct ethtool_rxfh_context *ctx,
const struct ethtool_rxfh_param *rxfh,
@@ -1446,7 +1445,7 @@ static int fbnic_set_channels(struct net_device *netdev,
standalone = ch->rx_count + ch->tx_count;
/* Limits for standalone queues:
- * - each queue has it's own NAPI (num_napi >= rx + tx + combined)
+ * - each queue has its own NAPI (num_napi >= rx + tx + combined)
* - combining queues (combined not 0, rx or tx must be 0)
*/
if ((ch->rx_count && ch->tx_count && ch->combined_count) ||
@@ -1612,35 +1611,107 @@ fbnic_get_eth_mac_stats(struct net_device *netdev,
&mac_stats->eth_mac.FrameTooLongErrors);
}
+static void
+fbnic_get_eth_ctrl_stats(struct net_device *netdev,
+ struct ethtool_eth_ctrl_stats *eth_ctrl_stats)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_mac_stats *mac_stats;
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ mac_stats = &fbd->hw_stats.mac;
+
+ fbd->mac->get_eth_ctrl_stats(fbd, false, &mac_stats->eth_ctrl);
+
+ eth_ctrl_stats->MACControlFramesReceived =
+ mac_stats->eth_ctrl.MACControlFramesReceived.value;
+ eth_ctrl_stats->MACControlFramesTransmitted =
+ mac_stats->eth_ctrl.MACControlFramesTransmitted.value;
+}
+
+static const struct ethtool_rmon_hist_range fbnic_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 2047 },
+ { 2048, 4095 },
+ { 4096, 8191 },
+ { 8192, 9216 },
+ { 9217, FBNIC_MAX_JUMBO_FRAME_SIZE },
+ {}
+};
+
+static void
+fbnic_get_rmon_stats(struct net_device *netdev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_mac_stats *mac_stats;
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i;
+
+ mac_stats = &fbd->hw_stats.mac;
+
+ fbd->mac->get_rmon_stats(fbd, false, &mac_stats->rmon);
+
+ rmon_stats->undersize_pkts =
+ mac_stats->rmon.undersize_pkts.value;
+ rmon_stats->oversize_pkts =
+ mac_stats->rmon.oversize_pkts.value;
+ rmon_stats->fragments =
+ mac_stats->rmon.fragments.value;
+ rmon_stats->jabbers =
+ mac_stats->rmon.jabbers.value;
+
+ for (i = 0; fbnic_rmon_ranges[i].high; i++) {
+ rmon_stats->hist[i] = mac_stats->rmon.hist[i].value;
+ rmon_stats->hist_tx[i] = mac_stats->rmon.hist_tx[i].value;
+ }
+
+ *ranges = fbnic_rmon_ranges;
+}
+
static const struct ethtool_ops fbnic_ethtool_ops = {
- .supported_coalesce_params =
- ETHTOOL_COALESCE_USECS |
- ETHTOOL_COALESCE_RX_MAX_FRAMES,
- .rxfh_max_num_contexts = FBNIC_RPC_RSS_TBL_COUNT,
- .get_drvinfo = fbnic_get_drvinfo,
- .get_regs_len = fbnic_get_regs_len,
- .get_regs = fbnic_get_regs,
- .get_coalesce = fbnic_get_coalesce,
- .set_coalesce = fbnic_set_coalesce,
- .get_ringparam = fbnic_get_ringparam,
- .set_ringparam = fbnic_set_ringparam,
- .get_strings = fbnic_get_strings,
- .get_ethtool_stats = fbnic_get_ethtool_stats,
- .get_sset_count = fbnic_get_sset_count,
- .get_rxnfc = fbnic_get_rxnfc,
- .set_rxnfc = fbnic_set_rxnfc,
- .get_rxfh_key_size = fbnic_get_rxfh_key_size,
- .get_rxfh_indir_size = fbnic_get_rxfh_indir_size,
- .get_rxfh = fbnic_get_rxfh,
- .set_rxfh = fbnic_set_rxfh,
- .create_rxfh_context = fbnic_create_rxfh_context,
- .modify_rxfh_context = fbnic_modify_rxfh_context,
- .remove_rxfh_context = fbnic_remove_rxfh_context,
- .get_channels = fbnic_get_channels,
- .set_channels = fbnic_set_channels,
- .get_ts_info = fbnic_get_ts_info,
- .get_ts_stats = fbnic_get_ts_stats,
- .get_eth_mac_stats = fbnic_get_eth_mac_stats,
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES,
+ .rxfh_max_num_contexts = FBNIC_RPC_RSS_TBL_COUNT,
+ .get_drvinfo = fbnic_get_drvinfo,
+ .get_regs_len = fbnic_get_regs_len,
+ .get_regs = fbnic_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = fbnic_get_coalesce,
+ .set_coalesce = fbnic_set_coalesce,
+ .get_ringparam = fbnic_get_ringparam,
+ .set_ringparam = fbnic_set_ringparam,
+ .get_pauseparam = fbnic_phylink_get_pauseparam,
+ .set_pauseparam = fbnic_phylink_set_pauseparam,
+ .get_strings = fbnic_get_strings,
+ .get_ethtool_stats = fbnic_get_ethtool_stats,
+ .get_sset_count = fbnic_get_sset_count,
+ .get_rxnfc = fbnic_get_rxnfc,
+ .set_rxnfc = fbnic_set_rxnfc,
+ .get_rxfh_key_size = fbnic_get_rxfh_key_size,
+ .get_rxfh_indir_size = fbnic_get_rxfh_indir_size,
+ .get_rxfh = fbnic_get_rxfh,
+ .set_rxfh = fbnic_set_rxfh,
+ .get_rxfh_fields = fbnic_get_rss_hash_opts,
+ .set_rxfh_fields = fbnic_set_rss_hash_opts,
+ .create_rxfh_context = fbnic_create_rxfh_context,
+ .modify_rxfh_context = fbnic_modify_rxfh_context,
+ .remove_rxfh_context = fbnic_remove_rxfh_context,
+ .get_channels = fbnic_get_channels,
+ .set_channels = fbnic_set_channels,
+ .get_ts_info = fbnic_get_ts_info,
+ .get_ts_stats = fbnic_get_ts_stats,
+ .get_link_ksettings = fbnic_phylink_ethtool_ksettings_get,
+ .get_fecparam = fbnic_phylink_get_fecparam,
+ .get_eth_mac_stats = fbnic_get_eth_mac_stats,
+ .get_eth_ctrl_stats = fbnic_get_eth_ctrl_stats,
+ .get_rmon_stats = fbnic_get_rmon_stats,
};
void fbnic_set_ethtool_ops(struct net_device *dev)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
index 4521d0483d18..0c55be7d2547 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
@@ -95,6 +95,9 @@ void fbnic_mbx_init(struct fbnic_dev *fbd)
/* Initialize lock to protect Tx ring */
spin_lock_init(&fbd->fw_tx_lock);
+ /* Reset FW Capabilities */
+ memset(&fbd->fw_cap, 0, sizeof(fbd->fw_cap));
+
/* Reinitialize mailbox memory */
for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
memset(&fbd->mbx[i], 0, sizeof(struct fbnic_fw_mbx));
@@ -335,6 +338,16 @@ unlock_mbx:
return err;
}
+void fbnic_mbx_clear_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *fw_cmpl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fbd->fw_tx_lock, flags);
+ fbnic_mbx_clear_cmpl_slot(fbd, fw_cmpl);
+ spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
+}
+
static void fbnic_fw_release_cmpl_data(struct kref *kref)
{
struct fbnic_fw_completion *cmpl_data;
@@ -373,11 +386,11 @@ fbnic_fw_get_cmpl_by_type(struct fbnic_dev *fbd, u32 msg_type)
*
* Return:
* One the following values:
- * -EOPNOTSUPP: Is not ASIC so mailbox is not supported
- * -ENODEV: Device I/O error
- * -ENOMEM: Failed to allocate message
- * -EBUSY: No space in mailbox
- * -ENOSPC: DMA mapping failed
+ * -EOPNOTSUPP: Is not ASIC so mailbox is not supported
+ * -ENODEV: Device I/O error
+ * -ENOMEM: Failed to allocate message
+ * -EBUSY: No space in mailbox
+ * -ENOSPC: DMA mapping failed
*
* This function sends a single TLV header indicating the host wants to take
* some action. However there are no other side effects which means that any
@@ -560,16 +573,15 @@ static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
if (!fbd->fw_cap.running.mgmt.version)
return -EINVAL;
- if (fbd->fw_cap.running.mgmt.version < MIN_FW_VERSION_CODE) {
+ if (fbd->fw_cap.running.mgmt.version < MIN_FW_VER_CODE) {
+ char required_ver[FBNIC_FW_VER_MAX_SIZE];
char running_ver[FBNIC_FW_VER_MAX_SIZE];
fbnic_mk_fw_ver_str(fbd->fw_cap.running.mgmt.version,
running_ver);
- dev_err(fbd->dev, "Device firmware version(%s) is older than minimum required version(%02d.%02d.%02d)\n",
- running_ver,
- MIN_FW_MAJOR_VERSION,
- MIN_FW_MINOR_VERSION,
- MIN_FW_BUILD_VERSION);
+ fbnic_mk_fw_ver_str(MIN_FW_VER_CODE, required_ver);
+ dev_err(fbd->dev, "Device firmware version(%s) is older than minimum required version(%s)\n",
+ running_ver, required_ver);
/* Disable TX mailbox to prevent card use until firmware is
* updated.
*/
@@ -1022,6 +1034,169 @@ msg_err:
return err;
}
+static const struct fbnic_tlv_index fbnic_fw_log_req_index[] = {
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_LOG_MSEC),
+ FBNIC_TLV_ATTR_U64(FBNIC_FW_LOG_INDEX),
+ FBNIC_TLV_ATTR_STRING(FBNIC_FW_LOG_MSG, FBNIC_FW_LOG_MAX_SIZE),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_LOG_LENGTH),
+ FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_LOG_MSEC_ARRAY),
+ FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_LOG_INDEX_ARRAY),
+ FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_LOG_MSG_ARRAY),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_process_log_array(struct fbnic_tlv_msg **results,
+ u16 length, u16 arr_type_idx,
+ u16 attr_type_idx,
+ struct fbnic_tlv_msg **tlv_array_out)
+{
+ struct fbnic_tlv_msg *attr;
+ int attr_len;
+ int err;
+
+ if (!results[attr_type_idx])
+ return -EINVAL;
+
+ tlv_array_out[0] = results[attr_type_idx];
+
+ if (!length)
+ return 0;
+
+ if (!results[arr_type_idx])
+ return -EINVAL;
+
+ attr = results[arr_type_idx];
+ attr_len = le16_to_cpu(attr->hdr.len) / sizeof(u32) - 1;
+ err = fbnic_tlv_attr_parse_array(&attr[1], attr_len, &tlv_array_out[1],
+ fbnic_fw_log_req_index,
+ attr_type_idx,
+ length);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int fbnic_fw_parse_logs(struct fbnic_dev *fbd,
+ struct fbnic_tlv_msg **msec_tlv,
+ struct fbnic_tlv_msg **index_tlv,
+ struct fbnic_tlv_msg **log_tlv,
+ int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ char log[FBNIC_FW_LOG_MAX_SIZE];
+ ssize_t len;
+ u64 index;
+ u32 msec;
+ int err;
+
+ if (!msec_tlv[i] || !index_tlv[i] || !log_tlv[i]) {
+ dev_warn(fbd->dev, "Received log message with missing attributes!\n");
+ return -EINVAL;
+ }
+
+ index = fbnic_tlv_attr_get_signed(index_tlv[i], 0);
+ msec = fbnic_tlv_attr_get_signed(msec_tlv[i], 0);
+ len = fbnic_tlv_attr_get_string(log_tlv[i], log,
+ FBNIC_FW_LOG_MAX_SIZE);
+ if (len < 0)
+ return len;
+
+ err = fbnic_fw_log_write(fbd, index, msec, log);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int fbnic_fw_parse_log_req(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_tlv_msg *index_tlv[FBNIC_FW_MAX_LOG_HISTORY];
+ struct fbnic_tlv_msg *msec_tlv[FBNIC_FW_MAX_LOG_HISTORY];
+ struct fbnic_tlv_msg *log_tlv[FBNIC_FW_MAX_LOG_HISTORY];
+ struct fbnic_dev *fbd = opaque;
+ u16 length;
+ int err;
+
+ length = fta_get_uint(results, FBNIC_FW_LOG_LENGTH);
+ if (length >= FBNIC_FW_MAX_LOG_HISTORY)
+ return -E2BIG;
+
+ err = fbnic_fw_process_log_array(results, length,
+ FBNIC_FW_LOG_MSEC_ARRAY,
+ FBNIC_FW_LOG_MSEC, msec_tlv);
+ if (err)
+ return err;
+
+ err = fbnic_fw_process_log_array(results, length,
+ FBNIC_FW_LOG_INDEX_ARRAY,
+ FBNIC_FW_LOG_INDEX, index_tlv);
+ if (err)
+ return err;
+
+ err = fbnic_fw_process_log_array(results, length,
+ FBNIC_FW_LOG_MSG_ARRAY,
+ FBNIC_FW_LOG_MSG, log_tlv);
+ if (err)
+ return err;
+
+ err = fbnic_fw_parse_logs(fbd, msec_tlv, index_tlv, log_tlv,
+ length + 1);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int fbnic_fw_xmit_send_logs(struct fbnic_dev *fbd, bool enable,
+ bool send_log_history)
+{
+ struct fbnic_tlv_msg *msg;
+ int err;
+
+ if (fbd->fw_cap.running.mgmt.version < MIN_FW_VER_CODE_LOG) {
+ dev_warn(fbd->dev, "Firmware version is too old to support firmware logs!\n");
+ return -EOPNOTSUPP;
+ }
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_LOG_SEND_LOGS_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ if (enable) {
+ err = fbnic_tlv_attr_put_flag(msg, FBNIC_SEND_LOGS);
+ if (err)
+ goto free_message;
+
+ /* Report request for version 1 of logs */
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_SEND_LOGS_VERSION,
+ FBNIC_FW_LOG_VERSION);
+ if (err)
+ goto free_message;
+
+ if (send_log_history) {
+ err = fbnic_tlv_attr_put_flag(msg,
+ FBNIC_SEND_LOGS_HISTORY);
+ if (err)
+ goto free_message;
+ }
+ }
+
+ err = fbnic_mbx_map_tlv_msg(fbd, msg);
+ if (err)
+ goto free_message;
+
+ return 0;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
FBNIC_TLV_PARSER(FW_CAP_RESP, fbnic_fw_cap_resp_index,
fbnic_fw_parse_cap_resp),
@@ -1041,6 +1216,9 @@ static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
FBNIC_TLV_PARSER(TSENE_READ_RESP,
fbnic_tsene_read_resp_index,
fbnic_fw_parse_tsene_read_resp),
+ FBNIC_TLV_PARSER(LOG_MSG_REQ,
+ fbnic_fw_log_req_index,
+ fbnic_fw_parse_log_req),
FBNIC_TLV_MSG_ERROR
};
@@ -1117,6 +1295,7 @@ void fbnic_mbx_poll(struct fbnic_dev *fbd)
int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
{
+ struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
unsigned long timeout = jiffies + 10 * HZ + 1;
int err, i;
@@ -1149,8 +1328,23 @@ int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
if (err)
goto clean_mbx;
- /* Use "1" to indicate we entered the state waiting for a response */
- fbd->fw_cap.running.mgmt.version = 1;
+ /* Poll until we get a current management firmware version, use "1"
+ * to indicate we entered the polling state waiting for a response
+ */
+ for (fbd->fw_cap.running.mgmt.version = 1;
+ fbd->fw_cap.running.mgmt.version < MIN_FW_VER_CODE;) {
+ if (!tx_mbx->ready)
+ err = -ENODEV;
+ if (err)
+ goto clean_mbx;
+
+ msleep(20);
+ fbnic_mbx_poll(fbd);
+
+ /* set err, but wait till mgmt.version check to report it */
+ if (!time_is_after_jiffies(timeout))
+ err = -ETIMEDOUT;
+ }
return 0;
clean_mbx:
@@ -1244,16 +1438,6 @@ struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type)
return cmpl;
}
-void fbnic_fw_clear_cmpl(struct fbnic_dev *fbd,
- struct fbnic_fw_completion *fw_cmpl)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&fbd->fw_tx_lock, flags);
- fbnic_mbx_clear_cmpl_slot(fbd, fw_cmpl);
- spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
-}
-
void fbnic_fw_put_cmpl(struct fbnic_fw_completion *fw_cmpl)
{
kref_put(&fw_cmpl->ref_count, fbnic_fw_release_cmpl_data);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
index 08bc4b918de7..fde331696fdd 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
@@ -19,10 +19,23 @@ struct fbnic_fw_mbx {
};
// FW_VER_MAX_SIZE must match ETHTOOL_FWVERS_LEN
-#define FBNIC_FW_VER_MAX_SIZE 32
+#define FBNIC_FW_VER_MAX_SIZE 32
// Formatted version is in the format XX.YY.ZZ_RRR_COMMIT
#define FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE (FBNIC_FW_VER_MAX_SIZE - 13)
-#define FBNIC_FW_LOG_MAX_SIZE 256
+#define FBNIC_FW_LOG_VERSION 1
+#define FBNIC_FW_LOG_MAX_SIZE 256
+/*
+ * The max amount of logs which can fit in a single mailbox message. Firmware
+ * assumes each mailbox message is 4096B. The amount of messages supported is
+ * calculated as 4096 minus headers for message, arrays, and length minus the
+ * size of length divided by headers for each array plus the maximum LOG size,
+ * and the size of MSEC and INDEX. Put another way:
+ *
+ * MAX_LOG_HISTORY = ((4096 - TLV_HDR_SZ * 5 - LENGTH_SZ)
+ * / (FBNIC_FW_LOG_MAX_SIZE + TLV_HDR_SZ * 3 + MSEC_SZ
+ * + INDEX_SZ))
+ */
+#define FBNIC_FW_MAX_LOG_HISTORY 14
struct fbnic_fw_ver {
u32 version;
@@ -66,6 +79,8 @@ void fbnic_mbx_init(struct fbnic_dev *fbd);
void fbnic_mbx_clean(struct fbnic_dev *fbd);
int fbnic_mbx_set_cmpl(struct fbnic_dev *fbd,
struct fbnic_fw_completion *cmpl_data);
+void fbnic_mbx_clear_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data);
void fbnic_mbx_poll(struct fbnic_dev *fbd);
int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd);
void fbnic_mbx_flush_tx(struct fbnic_dev *fbd);
@@ -80,9 +95,9 @@ int fbnic_fw_xmit_fw_write_chunk(struct fbnic_dev *fbd,
int cancel_error);
int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd,
struct fbnic_fw_completion *cmpl_data);
+int fbnic_fw_xmit_send_logs(struct fbnic_dev *fbd, bool enable,
+ bool send_log_history);
struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type);
-void fbnic_fw_clear_cmpl(struct fbnic_dev *fbd,
- struct fbnic_fw_completion *cmpl_data);
void fbnic_fw_put_cmpl(struct fbnic_fw_completion *cmpl_data);
#define fbnic_mk_full_fw_ver_str(_rev_id, _delim, _commit, _str, _str_sz) \
@@ -125,6 +140,9 @@ enum {
FBNIC_TLV_MSG_ID_FW_FINISH_UPGRADE_RESP = 0x29,
FBNIC_TLV_MSG_ID_TSENE_READ_REQ = 0x3C,
FBNIC_TLV_MSG_ID_TSENE_READ_RESP = 0x3D,
+ FBNIC_TLV_MSG_ID_LOG_SEND_LOGS_REQ = 0x43,
+ FBNIC_TLV_MSG_ID_LOG_MSG_REQ = 0x44,
+ FBNIC_TLV_MSG_ID_LOG_MSG_RESP = 0x45,
};
#define FBNIC_FW_CAP_RESP_VERSION_MAJOR CSR_GENMASK(31, 24)
@@ -155,10 +173,10 @@ enum {
};
enum {
- FBNIC_FW_LINK_SPEED_25R1 = 1,
- FBNIC_FW_LINK_SPEED_50R2 = 2,
- FBNIC_FW_LINK_SPEED_50R1 = 3,
- FBNIC_FW_LINK_SPEED_100R2 = 4,
+ FBNIC_FW_LINK_MODE_25CR = 1,
+ FBNIC_FW_LINK_MODE_50CR2 = 2,
+ FBNIC_FW_LINK_MODE_50CR = 3,
+ FBNIC_FW_LINK_MODE_100CR2 = 4,
};
enum {
@@ -199,4 +217,22 @@ enum {
FBNIC_FW_FINISH_UPGRADE_MSG_MAX
};
+enum {
+ FBNIC_SEND_LOGS = 0x0,
+ FBNIC_SEND_LOGS_VERSION = 0x1,
+ FBNIC_SEND_LOGS_HISTORY = 0x2,
+ FBNIC_SEND_LOGS_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_LOG_MSEC = 0x0,
+ FBNIC_FW_LOG_INDEX = 0x1,
+ FBNIC_FW_LOG_MSG = 0x2,
+ FBNIC_FW_LOG_LENGTH = 0x3,
+ FBNIC_FW_LOG_MSEC_ARRAY = 0x4,
+ FBNIC_FW_LOG_INDEX_ARRAY = 0x5,
+ FBNIC_FW_LOG_MSG_ARRAY = 0x6,
+ FBNIC_FW_LOG_MSG_MAX
+};
+
#endif /* _FBNIC_FW_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c
new file mode 100644
index 000000000000..c1663f042245
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
+
+#include "fbnic.h"
+#include "fbnic_fw.h"
+#include "fbnic_fw_log.h"
+
+void fbnic_fw_log_enable(struct fbnic_dev *fbd, bool send_hist)
+{
+ int err;
+
+ if (!fbnic_fw_log_ready(fbd))
+ return;
+
+ if (fbd->fw_cap.running.mgmt.version < MIN_FW_VER_CODE_HIST)
+ send_hist = false;
+
+ err = fbnic_fw_xmit_send_logs(fbd, true, send_hist);
+ if (err && err != -EOPNOTSUPP)
+ dev_warn(fbd->dev, "Unable to enable firmware logs: %d\n", err);
+}
+
+void fbnic_fw_log_disable(struct fbnic_dev *fbd)
+{
+ int err;
+
+ err = fbnic_fw_xmit_send_logs(fbd, false, false);
+ if (err && err != -EOPNOTSUPP)
+ dev_warn(fbd->dev, "Unable to disable firmware logs: %d\n",
+ err);
+}
+
+int fbnic_fw_log_init(struct fbnic_dev *fbd)
+{
+ struct fbnic_fw_log *log = &fbd->fw_log;
+ void *data;
+
+ if (WARN_ON_ONCE(fbnic_fw_log_ready(fbd)))
+ return -EEXIST;
+
+ data = vmalloc(FBNIC_FW_LOG_SIZE);
+ if (!data)
+ return -ENOMEM;
+
+ spin_lock_init(&fbd->fw_log.lock);
+ INIT_LIST_HEAD(&log->entries);
+ log->size = FBNIC_FW_LOG_SIZE;
+ log->data_start = data;
+ log->data_end = data + FBNIC_FW_LOG_SIZE;
+
+ fbnic_fw_log_enable(fbd, true);
+
+ return 0;
+}
+
+void fbnic_fw_log_free(struct fbnic_dev *fbd)
+{
+ struct fbnic_fw_log *log = &fbd->fw_log;
+
+ if (!fbnic_fw_log_ready(fbd))
+ return;
+
+ fbnic_fw_log_disable(fbd);
+ INIT_LIST_HEAD(&log->entries);
+ log->size = 0;
+ vfree(log->data_start);
+ log->data_start = NULL;
+ log->data_end = NULL;
+}
+
+int fbnic_fw_log_write(struct fbnic_dev *fbd, u64 index, u32 timestamp,
+ char *msg)
+{
+ struct fbnic_fw_log_entry *entry, *head, *tail, *next;
+ struct fbnic_fw_log *log = &fbd->fw_log;
+ size_t msg_len = strlen(msg) + 1;
+ unsigned long flags;
+ void *entry_end;
+
+ if (!fbnic_fw_log_ready(fbd)) {
+ dev_err(fbd->dev, "Firmware sent log entry without being requested!\n");
+ return -ENOSPC;
+ }
+
+ spin_lock_irqsave(&log->lock, flags);
+
+ if (list_empty(&log->entries)) {
+ entry = log->data_start;
+ } else {
+ head = list_first_entry(&log->entries, typeof(*head), list);
+ entry_end = head->msg + head->len + 1;
+ entry = PTR_ALIGN(entry_end, 8);
+ }
+
+ entry_end = entry->msg + msg_len + 1;
+
+ /* We've reached the end of the buffer, wrap around */
+ if (entry_end > log->data_end) {
+ entry = log->data_start;
+ entry_end = entry->msg + msg_len + 1;
+ }
+
+ /* Make room for entry by removing from tail. */
+ list_for_each_entry_safe_reverse(tail, next, &log->entries, list) {
+ if (entry <= tail && entry_end > (void *)tail)
+ list_del(&tail->list);
+ else
+ break;
+ }
+
+ entry->index = index;
+ entry->timestamp = timestamp;
+ entry->len = msg_len;
+ strscpy(entry->msg, msg, entry->len);
+ list_add(&entry->list, &log->entries);
+
+ spin_unlock_irqrestore(&log->lock, flags);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.h
new file mode 100644
index 000000000000..cb6555f40a24
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_FW_LOG_H_
+#define _FBNIC_FW_LOG_H_
+
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+/* A 512K log buffer was chosen fairly arbitrarily */
+#define FBNIC_FW_LOG_SIZE (512 * 1024) /* bytes */
+
+/* Firmware log output is prepended with log index followed by a timestamp.
+ * The timestamp is similar to Zephyr's format DD:HH:MM:SS.MMM
+ */
+#define FBNIC_FW_LOG_FMT "[%5lld] [%02ld:%02ld:%02ld:%02ld.%03ld] %s\n"
+
+struct fbnic_dev;
+
+struct fbnic_fw_log_entry {
+ struct list_head list;
+ u64 index;
+ u32 timestamp;
+ u16 len;
+ char msg[] __counted_by(len);
+};
+
+struct fbnic_fw_log {
+ void *data_start;
+ void *data_end;
+ size_t size;
+ struct list_head entries;
+ /* Spin lock for accessing or modifying entries */
+ spinlock_t lock;
+};
+
+#define fbnic_fw_log_ready(_fbd) (!!(_fbd)->fw_log.data_start)
+
+void fbnic_fw_log_enable(struct fbnic_dev *fbd, bool send_hist);
+void fbnic_fw_log_disable(struct fbnic_dev *fbd);
+int fbnic_fw_log_init(struct fbnic_dev *fbd);
+void fbnic_fw_log_free(struct fbnic_dev *fbd);
+int fbnic_fw_log_write(struct fbnic_dev *fbd, u64 index, u32 timestamp,
+ char *msg);
+#endif /* _FBNIC_FW_LOG_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
index 07e54bb75bf3..4fe239717497 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
@@ -22,6 +22,23 @@ struct fbnic_hw_stat {
struct fbnic_stat_counter bytes;
};
+/* Note: not updated by fbnic_get_hw_stats() */
+struct fbnic_eth_ctrl_stats {
+ struct fbnic_stat_counter MACControlFramesTransmitted;
+ struct fbnic_stat_counter MACControlFramesReceived;
+};
+
+/* Note: not updated by fbnic_get_hw_stats() */
+struct fbnic_rmon_stats {
+ struct fbnic_stat_counter undersize_pkts;
+ struct fbnic_stat_counter oversize_pkts;
+ struct fbnic_stat_counter fragments;
+ struct fbnic_stat_counter jabbers;
+
+ struct fbnic_stat_counter hist[ETHTOOL_RMON_HIST_MAX];
+ struct fbnic_stat_counter hist_tx[ETHTOOL_RMON_HIST_MAX];
+};
+
struct fbnic_eth_mac_stats {
struct fbnic_stat_counter FramesTransmittedOK;
struct fbnic_stat_counter FramesReceivedOK;
@@ -40,6 +57,8 @@ struct fbnic_eth_mac_stats {
struct fbnic_mac_stats {
struct fbnic_eth_mac_stats eth_mac;
+ struct fbnic_eth_ctrl_stats eth_ctrl;
+ struct fbnic_rmon_stats rmon;
};
struct fbnic_tmi_stats {
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
index 10e108c1fcd0..fd8d67f9048e 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
@@ -452,7 +452,7 @@ static u32 __fbnic_mac_cmd_config_asic(struct fbnic_dev *fbd,
command_config |= FBNIC_MAC_COMMAND_CONFIG_RX_PAUSE_DIS;
/* Disable fault handling if no FEC is requested */
- if ((fbn->fec & FBNIC_FEC_MODE_MASK) == FBNIC_FEC_OFF)
+ if (fbn->fec == FBNIC_FEC_OFF)
command_config |= FBNIC_MAC_COMMAND_CONFIG_FLT_HDL_DIS;
return command_config;
@@ -468,15 +468,15 @@ static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd)
return false;
/* Define the expected lane mask for the status bits we need to check */
- switch (fbn->link_mode & FBNIC_LINK_MODE_MASK) {
- case FBNIC_LINK_100R2:
+ switch (fbn->aui) {
+ case FBNIC_AUI_100GAUI2:
lane_mask = 0xf;
break;
- case FBNIC_LINK_50R1:
+ case FBNIC_AUI_50GAUI1:
lane_mask = 3;
break;
- case FBNIC_LINK_50R2:
- switch (fbn->fec & FBNIC_FEC_MODE_MASK) {
+ case FBNIC_AUI_LAUI2:
+ switch (fbn->fec) {
case FBNIC_FEC_OFF:
lane_mask = 0x63;
break;
@@ -488,13 +488,13 @@ static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd)
break;
}
break;
- case FBNIC_LINK_25R1:
+ case FBNIC_AUI_25GAUI:
lane_mask = 1;
break;
}
/* Use an XOR to remove the bits we expect to see set */
- switch (fbn->fec & FBNIC_FEC_MODE_MASK) {
+ switch (fbn->fec) {
case FBNIC_FEC_OFF:
lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT0_BLOCK_LOCK,
pcs_status);
@@ -540,53 +540,41 @@ static bool fbnic_pcs_get_link_asic(struct fbnic_dev *fbd)
return link;
}
-static void fbnic_pcs_get_fw_settings(struct fbnic_dev *fbd)
+void fbnic_mac_get_fw_settings(struct fbnic_dev *fbd, u8 *aui, u8 *fec)
{
- struct fbnic_net *fbn = netdev_priv(fbd->netdev);
- u8 link_mode = fbn->link_mode;
- u8 fec = fbn->fec;
-
- /* Update FEC first to reflect FW current mode */
- if (fbn->fec & FBNIC_FEC_AUTO) {
- switch (fbd->fw_cap.link_fec) {
- case FBNIC_FW_LINK_FEC_NONE:
- fec = FBNIC_FEC_OFF;
- break;
- case FBNIC_FW_LINK_FEC_RS:
- fec = FBNIC_FEC_RS;
- break;
- case FBNIC_FW_LINK_FEC_BASER:
- fec = FBNIC_FEC_BASER;
- break;
- default:
- return;
- }
-
- fbn->fec = fec;
+ /* Retrieve default speed from FW */
+ switch (fbd->fw_cap.link_speed) {
+ case FBNIC_FW_LINK_MODE_25CR:
+ *aui = FBNIC_AUI_25GAUI;
+ break;
+ case FBNIC_FW_LINK_MODE_50CR2:
+ *aui = FBNIC_AUI_LAUI2;
+ break;
+ case FBNIC_FW_LINK_MODE_50CR:
+ *aui = FBNIC_AUI_50GAUI1;
+ *fec = FBNIC_FEC_RS;
+ return;
+ case FBNIC_FW_LINK_MODE_100CR2:
+ *aui = FBNIC_AUI_100GAUI2;
+ *fec = FBNIC_FEC_RS;
+ return;
+ default:
+ *aui = FBNIC_AUI_UNKNOWN;
+ return;
}
- /* Do nothing if AUTO mode is not engaged */
- if (fbn->link_mode & FBNIC_LINK_AUTO) {
- switch (fbd->fw_cap.link_speed) {
- case FBNIC_FW_LINK_SPEED_25R1:
- link_mode = FBNIC_LINK_25R1;
- break;
- case FBNIC_FW_LINK_SPEED_50R2:
- link_mode = FBNIC_LINK_50R2;
- break;
- case FBNIC_FW_LINK_SPEED_50R1:
- link_mode = FBNIC_LINK_50R1;
- fec = FBNIC_FEC_RS;
- break;
- case FBNIC_FW_LINK_SPEED_100R2:
- link_mode = FBNIC_LINK_100R2;
- fec = FBNIC_FEC_RS;
- break;
- default:
- return;
- }
-
- fbn->link_mode = link_mode;
+ /* Update FEC first to reflect FW current mode */
+ switch (fbd->fw_cap.link_fec) {
+ case FBNIC_FW_LINK_FEC_NONE:
+ *fec = FBNIC_FEC_OFF;
+ break;
+ case FBNIC_FW_LINK_FEC_RS:
+ default:
+ *fec = FBNIC_FEC_RS;
+ break;
+ case FBNIC_FW_LINK_FEC_BASER:
+ *fec = FBNIC_FEC_BASER;
+ break;
}
}
@@ -596,9 +584,6 @@ static int fbnic_pcs_enable_asic(struct fbnic_dev *fbd)
wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, ~0);
wr32(fbd, FBNIC_SIG_PCS_INTR_STS, ~0);
- /* Pull in settings from FW */
- fbnic_pcs_get_fw_settings(fbd);
-
return 0;
}
@@ -680,6 +665,76 @@ fbnic_mac_get_eth_mac_stats(struct fbnic_dev *fbd, bool reset,
MAC_STAT_TX_BROADCAST);
}
+static void
+fbnic_mac_get_eth_ctrl_stats(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_eth_ctrl_stats *ctrl_stats)
+{
+ fbnic_mac_stat_rd64(fbd, reset, ctrl_stats->MACControlFramesReceived,
+ MAC_STAT_RX_CONTROL_FRAMES);
+ fbnic_mac_stat_rd64(fbd, reset, ctrl_stats->MACControlFramesTransmitted,
+ MAC_STAT_TX_CONTROL_FRAMES);
+}
+
+static void
+fbnic_mac_get_rmon_stats(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_rmon_stats *rmon_stats)
+{
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->undersize_pkts,
+ MAC_STAT_RX_UNDERSIZE);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->oversize_pkts,
+ MAC_STAT_RX_OVERSIZE);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->fragments,
+ MAC_STAT_RX_FRAGMENT);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->jabbers,
+ MAC_STAT_RX_JABBER);
+
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[0],
+ MAC_STAT_RX_PACKET_64_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[1],
+ MAC_STAT_RX_PACKET_65_127_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[2],
+ MAC_STAT_RX_PACKET_128_255_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[3],
+ MAC_STAT_RX_PACKET_256_511_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[4],
+ MAC_STAT_RX_PACKET_512_1023_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[5],
+ MAC_STAT_RX_PACKET_1024_1518_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[6],
+ RPC_STAT_RX_PACKET_1519_2047_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[7],
+ RPC_STAT_RX_PACKET_2048_4095_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[8],
+ RPC_STAT_RX_PACKET_4096_8191_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[9],
+ RPC_STAT_RX_PACKET_8192_9216_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[10],
+ RPC_STAT_RX_PACKET_9217_MAX_BYTES);
+
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[0],
+ MAC_STAT_TX_PACKET_64_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[1],
+ MAC_STAT_TX_PACKET_65_127_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[2],
+ MAC_STAT_TX_PACKET_128_255_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[3],
+ MAC_STAT_TX_PACKET_256_511_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[4],
+ MAC_STAT_TX_PACKET_512_1023_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[5],
+ MAC_STAT_TX_PACKET_1024_1518_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[6],
+ TMI_STAT_TX_PACKET_1519_2047_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[7],
+ TMI_STAT_TX_PACKET_2048_4095_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[8],
+ TMI_STAT_TX_PACKET_4096_8191_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[9],
+ TMI_STAT_TX_PACKET_8192_9216_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[10],
+ TMI_STAT_TX_PACKET_9217_MAX_BYTES);
+}
+
static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id,
long *val)
{
@@ -741,7 +796,7 @@ static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id,
*val = *sensor;
exit_cleanup:
- fbnic_fw_clear_cmpl(fbd, fw_cmpl);
+ fbnic_mbx_clear_cmpl(fbd, fw_cmpl);
exit_free:
fbnic_fw_put_cmpl(fw_cmpl);
@@ -755,6 +810,8 @@ static const struct fbnic_mac fbnic_mac_asic = {
.pcs_get_link = fbnic_pcs_get_link_asic,
.pcs_get_link_event = fbnic_pcs_get_link_event_asic,
.get_eth_mac_stats = fbnic_mac_get_eth_mac_stats,
+ .get_eth_ctrl_stats = fbnic_mac_get_eth_ctrl_stats,
+ .get_rmon_stats = fbnic_mac_get_rmon_stats,
.link_down = fbnic_mac_link_down_asic,
.link_up = fbnic_mac_link_up_asic,
.get_sensor = fbnic_mac_get_sensor_asic,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
index 05a591653e09..86fa06da2b3e 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
@@ -25,27 +25,23 @@ enum {
FBNIC_FEC_OFF = 0,
FBNIC_FEC_RS = 1,
FBNIC_FEC_BASER = 2,
- FBNIC_FEC_AUTO = 4,
};
-#define FBNIC_FEC_MODE_MASK (FBNIC_FEC_AUTO - 1)
-
-/* Treat the link modes as a set of modulation/lanes bitmask:
+/* Treat the AUI modes as a modulation/lanes bitmask:
* Bit 0: Lane Count, 0 = R1, 1 = R2
* Bit 1: Modulation, 0 = NRZ, 1 = PAM4
- * Bit 2: Retrieve link mode from FW
+ * Bit 2: Unknown Modulation/Lane Configuration
*/
enum {
- FBNIC_LINK_25R1 = 0,
- FBNIC_LINK_50R2 = 1,
- FBNIC_LINK_50R1 = 2,
- FBNIC_LINK_100R2 = 3,
- FBNIC_LINK_AUTO = 4,
+ FBNIC_AUI_25GAUI = 0, /* 25.7812GBd 25.78125 * 1 */
+ FBNIC_AUI_LAUI2 = 1, /* 51.5625GBd 25.78128 * 2 */
+ FBNIC_AUI_50GAUI1 = 2, /* 53.125GBd 53.125 * 1 */
+ FBNIC_AUI_100GAUI2 = 3, /* 106.25GBd 53.125 * 2 */
+ FBNIC_AUI_UNKNOWN = 4,
};
-#define FBNIC_LINK_MODE_R2 (FBNIC_LINK_50R2)
-#define FBNIC_LINK_MODE_PAM4 (FBNIC_LINK_50R1)
-#define FBNIC_LINK_MODE_MASK (FBNIC_LINK_AUTO - 1)
+#define FBNIC_AUI_MODE_R2 (FBNIC_AUI_LAUI2)
+#define FBNIC_AUI_MODE_PAM4 (FBNIC_AUI_50GAUI1)
enum fbnic_sensor_id {
FBNIC_SENSOR_TEMP, /* Temp in millidegrees Centigrade */
@@ -85,6 +81,10 @@ struct fbnic_mac {
void (*get_eth_mac_stats)(struct fbnic_dev *fbd, bool reset,
struct fbnic_eth_mac_stats *mac_stats);
+ void (*get_eth_ctrl_stats)(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_eth_ctrl_stats *ctrl_stats);
+ void (*get_rmon_stats)(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_rmon_stats *rmon_stats);
void (*link_down)(struct fbnic_dev *fbd);
void (*link_up)(struct fbnic_dev *fbd, bool tx_pause, bool rx_pause);
@@ -93,4 +93,5 @@ struct fbnic_mac {
};
int fbnic_mac_init(struct fbnic_dev *fbd);
+void fbnic_mac_get_fw_settings(struct fbnic_dev *fbd, u8 *aui, u8 *fec);
#endif /* _FBNIC_MAC_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
index aa812c63d5af..7bd7812d9c06 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -736,8 +736,6 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
*/
netdev->ethtool->wol_enabled = true;
- fbn->fec = FBNIC_FEC_AUTO | FBNIC_FEC_RS;
- fbn->link_mode = FBNIC_LINK_AUTO | FBNIC_LINK_50R2;
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
index 561837e80ec8..86576ae04262 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
@@ -4,8 +4,8 @@
#ifndef _FBNIC_NETDEV_H_
#define _FBNIC_NETDEV_H_
-#include <linux/types.h>
#include <linux/phylink.h>
+#include <linux/types.h>
#include "fbnic_csr.h"
#include "fbnic_rpc.h"
@@ -42,9 +42,8 @@ struct fbnic_net {
struct phylink_config phylink_config;
struct phylink_pcs phylink_pcs;
- /* TBD: Remove these when phylink supports FEC and lane config */
+ u8 aui;
u8 fec;
- u8 link_mode;
/* Cached top bits of the HW time counter for 40b -> 64b conversion */
u32 time_high;
@@ -67,7 +66,7 @@ struct fbnic_net {
struct fbnic_queue_stats rx_stats;
u64 link_down_events;
- /* Time stampinn filter config */
+ /* Time stamping filter config */
struct kernel_hwtstamp_config hwtstamp_config;
};
@@ -82,6 +81,7 @@ int fbnic_netdev_register(struct net_device *netdev);
void fbnic_netdev_unregister(struct net_device *netdev);
void fbnic_reset_queues(struct fbnic_net *fbn,
unsigned int tx, unsigned int rx);
+
void fbnic_set_ethtool_ops(struct net_device *dev);
int fbnic_ptp_setup(struct fbnic_dev *fbd);
@@ -93,5 +93,13 @@ void fbnic_time_stop(struct fbnic_net *fbn);
void __fbnic_set_rx_mode(struct net_device *netdev);
void fbnic_clear_rx_mode(struct net_device *netdev);
+void fbnic_phylink_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause);
+int fbnic_phylink_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause);
+int fbnic_phylink_ethtool_ksettings_get(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd);
+int fbnic_phylink_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam);
int fbnic_phylink_init(struct net_device *netdev);
#endif /* _FBNIC_NETDEV_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
index 249d3ef862d5..b70e4cadb37b 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
@@ -291,6 +291,17 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto free_irqs;
}
+ /* Send the request to enable the FW logging to host. Note if this
+ * fails we ignore the error and just display a message as it is
+ * possible the FW is just too old to support the logging and needs
+ * to be updated.
+ */
+ err = fbnic_fw_log_init(fbd);
+ if (err)
+ dev_warn(fbd->dev,
+ "Unable to initialize firmware log buffer: %d\n",
+ err);
+
fbnic_devlink_register(fbd);
fbnic_dbg_fbd_init(fbd);
spin_lock_init(&fbd->hw_stats_lock);
@@ -365,6 +376,7 @@ static void fbnic_remove(struct pci_dev *pdev)
fbnic_hwmon_unregister(fbd);
fbnic_dbg_fbd_exit(fbd);
fbnic_devlink_unregister(fbd);
+ fbnic_fw_log_free(fbd);
fbnic_fw_free_mbx(fbd);
fbnic_free_irqs(fbd);
@@ -389,6 +401,8 @@ static int fbnic_pm_suspend(struct device *dev)
rtnl_unlock();
null_uc_addr:
+ fbnic_fw_log_disable(fbd);
+
devl_lock(priv_to_devlink(fbd));
fbnic_fw_free_mbx(fbd);
@@ -434,6 +448,11 @@ static int __fbnic_pm_resume(struct device *dev)
devl_unlock(priv_to_devlink(fbd));
+ /* Only send log history if log buffer is empty to prevent duplicate
+ * log entries.
+ */
+ fbnic_fw_log_enable(fbd, list_empty(&fbd->fw_log.entries));
+
/* No netdev means there isn't a network interface to bring up */
if (fbnic_init_failure(fbd))
return 0;
@@ -455,6 +474,8 @@ static int __fbnic_pm_resume(struct device *dev)
return 0;
err_free_mbx:
+ fbnic_fw_log_disable(fbd);
+
rtnl_unlock();
fbnic_fw_free_mbx(fbd);
err_free_irqs:
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
index 860b02b22c15..7ce3fdd25282 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
@@ -8,6 +8,99 @@
#include "fbnic_mac.h"
#include "fbnic_netdev.h"
+static phy_interface_t fbnic_phylink_select_interface(u8 aui)
+{
+ switch (aui) {
+ case FBNIC_AUI_100GAUI2:
+ return PHY_INTERFACE_MODE_100GBASEP;
+ case FBNIC_AUI_50GAUI1:
+ return PHY_INTERFACE_MODE_50GBASER;
+ case FBNIC_AUI_LAUI2:
+ return PHY_INTERFACE_MODE_LAUI;
+ case FBNIC_AUI_25GAUI:
+ return PHY_INTERFACE_MODE_25GBASER;
+ }
+
+ return PHY_INTERFACE_MODE_NA;
+}
+
+void fbnic_phylink_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ phylink_ethtool_get_pauseparam(fbn->phylink, pause);
+}
+
+int fbnic_phylink_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ return phylink_ethtool_set_pauseparam(fbn->phylink, pause);
+}
+
+static void
+fbnic_phylink_get_supported_fec_modes(unsigned long *supported)
+{
+ /* The NIC can support up to 8 possible combinations.
+ * Either 50G-CR, or 100G-CR2
+ * This is with RS FEC mode only
+ * Either 25G-CR, or 50G-CR2
+ * This is with No FEC, RS, or Base-R
+ */
+ if (phylink_test(supported, 100000baseCR2_Full) ||
+ phylink_test(supported, 50000baseCR_Full))
+ phylink_set(supported, FEC_RS);
+ if (phylink_test(supported, 50000baseCR2_Full) ||
+ phylink_test(supported, 25000baseCR_Full)) {
+ phylink_set(supported, FEC_BASER);
+ phylink_set(supported, FEC_NONE);
+ phylink_set(supported, FEC_RS);
+ }
+}
+
+int fbnic_phylink_ethtool_ksettings_get(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ int err;
+
+ err = phylink_ethtool_ksettings_get(fbn->phylink, cmd);
+ if (!err) {
+ unsigned long *supp = cmd->link_modes.supported;
+
+ cmd->base.port = PORT_DA;
+ cmd->lanes = (fbn->aui & FBNIC_AUI_MODE_R2) ? 2 : 1;
+
+ fbnic_phylink_get_supported_fec_modes(supp);
+ }
+
+ return err;
+}
+
+int fbnic_phylink_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ if (fbn->fec & FBNIC_FEC_RS) {
+ fecparam->active_fec = ETHTOOL_FEC_RS;
+ fecparam->fec = ETHTOOL_FEC_RS;
+ } else if (fbn->fec & FBNIC_FEC_BASER) {
+ fecparam->active_fec = ETHTOOL_FEC_BASER;
+ fecparam->fec = ETHTOOL_FEC_BASER;
+ } else {
+ fecparam->active_fec = ETHTOOL_FEC_OFF;
+ fecparam->fec = ETHTOOL_FEC_OFF;
+ }
+
+ if (fbn->aui & FBNIC_AUI_MODE_PAM4)
+ fecparam->fec |= ETHTOOL_FEC_AUTO;
+
+ return 0;
+}
+
static struct fbnic_net *
fbnic_pcs_to_net(struct phylink_pcs *pcs)
{
@@ -21,23 +114,20 @@ fbnic_phylink_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct fbnic_net *fbn = fbnic_pcs_to_net(pcs);
struct fbnic_dev *fbd = fbn->fbd;
- /* For now we use hard-coded defaults and FW config to determine
- * the current values. In future patches we will add support for
- * reconfiguring these values and changing link settings.
- */
- switch (fbd->fw_cap.link_speed) {
- case FBNIC_FW_LINK_SPEED_25R1:
+ switch (fbn->aui) {
+ case FBNIC_AUI_25GAUI:
state->speed = SPEED_25000;
break;
- case FBNIC_FW_LINK_SPEED_50R2:
+ case FBNIC_AUI_LAUI2:
+ case FBNIC_AUI_50GAUI1:
state->speed = SPEED_50000;
break;
- case FBNIC_FW_LINK_SPEED_100R2:
+ case FBNIC_AUI_100GAUI2:
state->speed = SPEED_100000;
break;
default:
- state->speed = SPEED_UNKNOWN;
- break;
+ state->link = 0;
+ return;
}
state->duplex = DUPLEX_FULL;
@@ -131,6 +221,7 @@ static const struct phylink_mac_ops fbnic_phylink_mac_ops = {
int fbnic_phylink_init(struct net_device *netdev)
{
struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
struct phylink *phylink;
fbn->phylink_pcs.ops = &fbnic_phylink_pcs_ops;
@@ -138,18 +229,23 @@ int fbnic_phylink_init(struct net_device *netdev)
fbn->phylink_config.dev = &netdev->dev;
fbn->phylink_config.type = PHYLINK_NETDEV;
fbn->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
- MAC_10000FD | MAC_25000FD |
- MAC_40000FD | MAC_50000FD |
+ MAC_25000FD | MAC_50000FD |
MAC_100000FD;
fbn->phylink_config.default_an_inband = true;
- __set_bit(PHY_INTERFACE_MODE_XGMII,
+ __set_bit(PHY_INTERFACE_MODE_100GBASEP,
+ fbn->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_50GBASER,
fbn->phylink_config.supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_XLGMII,
+ __set_bit(PHY_INTERFACE_MODE_LAUI,
fbn->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_25GBASER,
+ fbn->phylink_config.supported_interfaces);
+
+ fbnic_mac_get_fw_settings(fbd, &fbn->aui, &fbn->fec);
phylink = phylink_create(&fbn->phylink_config, NULL,
- PHY_INTERFACE_MODE_XLGMII,
+ fbnic_phylink_select_interface(fbn->aui),
&fbnic_phylink_mac_ops);
if (IS_ERR(phylink))
return PTR_ERR(phylink);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
index f46616af41ea..2e361d6f03ff 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
@@ -141,9 +141,6 @@ struct fbnic_napi_vector {
struct fbnic_q_triad qt[];
};
-#define FBNIC_MAX_TXQS 128u
-#define FBNIC_MAX_RXQS 128u
-
netdev_tx_t fbnic_xmit_frame(struct sk_buff *skb, struct net_device *dev);
netdev_features_t
fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index c7b0b09c2b09..541c41a9077a 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -335,7 +335,7 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter)
/* When running in DMA Mode the RX interrupt is not enabled in
timberdale because RX data is received by DMA callbacks
it must still be enabled in the KS8842 because it indicates
- to timberdale when there is RX data for it's DMA FIFOs */
+ to timberdale when there is RX data for its DMA FIFOs */
iowrite16(ENABLED_IRQS_DMA_IP, adapter->hw_addr + REG_TIMB_IER);
ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
} else {
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 64a3b953cc17..40002d9fe274 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -913,23 +913,29 @@ static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset)
}
}
+static int lan743x_ethtool_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *fields)
+{
+ fields->data = 0;
+
+ switch (fields->flow_type) {
+ case TCP_V4_FLOW:case UDP_V4_FLOW:
+ case TCP_V6_FLOW:case UDP_V6_FLOW:
+ fields->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case IPV4_FLOW: case IPV6_FLOW:
+ fields->data |= RXH_IP_SRC | RXH_IP_DST;
+ return 0;
+ }
+
+ return 0;
+}
+
static int lan743x_ethtool_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *rxnfc,
u32 *rule_locs)
{
switch (rxnfc->cmd) {
- case ETHTOOL_GRXFH:
- rxnfc->data = 0;
- switch (rxnfc->flow_type) {
- case TCP_V4_FLOW:case UDP_V4_FLOW:
- case TCP_V6_FLOW:case UDP_V6_FLOW:
- rxnfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- fallthrough;
- case IPV4_FLOW: case IPV6_FLOW:
- rxnfc->data |= RXH_IP_SRC | RXH_IP_DST;
- return 0;
- }
- break;
case ETHTOOL_GRXRINGS:
rxnfc->data = LAN743X_USED_RX_CHANNELS;
return 0;
@@ -1368,6 +1374,7 @@ const struct ethtool_ops lan743x_ethtool_ops = {
.get_rxfh_indir_size = lan743x_ethtool_get_rxfh_indir_size,
.get_rxfh = lan743x_ethtool_get_rxfh,
.set_rxfh = lan743x_ethtool_set_rxfh,
+ .get_rxfh_fields = lan743x_ethtool_get_rxfh_fields,
.get_ts_info = lan743x_ethtool_get_ts_info,
.get_eee = lan743x_ethtool_get_eee,
.set_eee = lan743x_ethtool_set_eee,
diff --git a/drivers/net/ethernet/microsoft/Kconfig b/drivers/net/ethernet/microsoft/Kconfig
index 901fbffbf718..3f36ee6a8ece 100644
--- a/drivers/net/ethernet/microsoft/Kconfig
+++ b/drivers/net/ethernet/microsoft/Kconfig
@@ -22,6 +22,7 @@ config MICROSOFT_MANA
depends on PCI_HYPERV
select AUXILIARY_BUS
select PAGE_POOL
+ select NET_SHAPER
help
This driver supports Microsoft Azure Network Adapter (MANA).
So far, the driver is only supported on X86_64.
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 3504507477c6..43f034e180c4 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -6,8 +6,12 @@
#include <linux/pci.h>
#include <linux/utsname.h>
#include <linux/version.h>
+#include <linux/msi.h>
+#include <linux/irqdomain.h>
+#include <linux/export.h>
#include <net/mana/mana.h>
+#include <net/mana/hw_channel.h>
struct dentry *mana_debugfs_root;
@@ -31,6 +35,9 @@ static void mana_gd_init_pf_regs(struct pci_dev *pdev)
gc->db_page_base = gc->bar0_va +
mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF);
+ gc->phys_db_page_base = gc->bar0_pa +
+ mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF);
+
sriov_base_off = mana_gd_r64(gc, GDMA_SRIOV_REG_CFG_BASE_OFF);
sriov_base_va = gc->bar0_va + sriov_base_off;
@@ -63,6 +70,24 @@ static void mana_gd_init_registers(struct pci_dev *pdev)
mana_gd_init_vf_regs(pdev);
}
+/* Suppress logging when we set timeout to zero */
+bool mana_need_log(struct gdma_context *gc, int err)
+{
+ struct hw_channel_context *hwc;
+
+ if (err != -ETIMEDOUT)
+ return true;
+
+ if (!gc)
+ return true;
+
+ hwc = gc->hwc.driver_data;
+ if (hwc && hwc->hwc_timeout == 0)
+ return false;
+
+ return true;
+}
+
static int mana_gd_query_max_resources(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
@@ -80,8 +105,15 @@ static int mana_gd_query_max_resources(struct pci_dev *pdev)
return err ? err : -EPROTO;
}
- if (gc->num_msix_usable > resp.max_msix)
- gc->num_msix_usable = resp.max_msix;
+ if (!pci_msix_can_alloc_dyn(pdev)) {
+ if (gc->num_msix_usable > resp.max_msix)
+ gc->num_msix_usable = resp.max_msix;
+ } else {
+ /* If dynamic allocation is enabled we have already allocated
+ * hwc msi
+ */
+ gc->num_msix_usable = min(resp.max_msix, num_online_cpus() + 1);
+ }
if (gc->num_msix_usable <= 1)
return -ENOSPC;
@@ -266,8 +298,9 @@ static int mana_gd_disable_queue(struct gdma_queue *queue)
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status) {
- dev_err(gc->dev, "Failed to disable queue: %d, 0x%x\n", err,
- resp.hdr.status);
+ if (mana_need_log(gc, err))
+ dev_err(gc->dev, "Failed to disable queue: %d, 0x%x\n", err,
+ resp.hdr.status);
return err ? err : -EPROTO;
}
@@ -352,11 +385,113 @@ void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
}
EXPORT_SYMBOL_NS(mana_gd_ring_cq, "NET_MANA");
+#define MANA_SERVICE_PERIOD 10
+
+static void mana_serv_fpga(struct pci_dev *pdev)
+{
+ struct pci_bus *bus, *parent;
+
+ pci_lock_rescan_remove();
+
+ bus = pdev->bus;
+ if (!bus) {
+ dev_err(&pdev->dev, "MANA service: no bus\n");
+ goto out;
+ }
+
+ parent = bus->parent;
+ if (!parent) {
+ dev_err(&pdev->dev, "MANA service: no parent bus\n");
+ goto out;
+ }
+
+ pci_stop_and_remove_bus_device(bus->self);
+
+ msleep(MANA_SERVICE_PERIOD * 1000);
+
+ pci_rescan_bus(parent);
+
+out:
+ pci_unlock_rescan_remove();
+}
+
+static void mana_serv_reset(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ struct hw_channel_context *hwc;
+
+ if (!gc) {
+ dev_err(&pdev->dev, "MANA service: no GC\n");
+ return;
+ }
+
+ hwc = gc->hwc.driver_data;
+ if (!hwc) {
+ dev_err(&pdev->dev, "MANA service: no HWC\n");
+ goto out;
+ }
+
+ /* HWC is not responding in this case, so don't wait */
+ hwc->hwc_timeout = 0;
+
+ dev_info(&pdev->dev, "MANA reset cycle start\n");
+
+ mana_gd_suspend(pdev, PMSG_SUSPEND);
+
+ msleep(MANA_SERVICE_PERIOD * 1000);
+
+ mana_gd_resume(pdev);
+
+ dev_info(&pdev->dev, "MANA reset cycle completed\n");
+
+out:
+ gc->in_service = false;
+}
+
+struct mana_serv_work {
+ struct work_struct serv_work;
+ struct pci_dev *pdev;
+ enum gdma_eqe_type type;
+};
+
+static void mana_serv_func(struct work_struct *w)
+{
+ struct mana_serv_work *mns_wk;
+ struct pci_dev *pdev;
+
+ mns_wk = container_of(w, struct mana_serv_work, serv_work);
+ pdev = mns_wk->pdev;
+
+ if (!pdev)
+ goto out;
+
+ switch (mns_wk->type) {
+ case GDMA_EQE_HWC_FPGA_RECONFIG:
+ mana_serv_fpga(pdev);
+ break;
+
+ case GDMA_EQE_HWC_RESET_REQUEST:
+ mana_serv_reset(pdev);
+ break;
+
+ default:
+ dev_err(&pdev->dev, "MANA service: unknown type %d\n",
+ mns_wk->type);
+ break;
+ }
+
+out:
+ pci_dev_put(pdev);
+ kfree(mns_wk);
+ module_put(THIS_MODULE);
+}
+
static void mana_gd_process_eqe(struct gdma_queue *eq)
{
u32 head = eq->head % (eq->queue_size / GDMA_EQE_SIZE);
struct gdma_context *gc = eq->gdma_dev->gdma_context;
struct gdma_eqe *eq_eqe_ptr = eq->queue_mem_ptr;
+ struct mana_serv_work *mns_wk;
union gdma_eqe_info eqe_info;
enum gdma_eqe_type type;
struct gdma_event event;
@@ -401,6 +536,35 @@ static void mana_gd_process_eqe(struct gdma_queue *eq)
eq->eq.callback(eq->eq.context, eq, &event);
break;
+ case GDMA_EQE_HWC_FPGA_RECONFIG:
+ case GDMA_EQE_HWC_RESET_REQUEST:
+ dev_info(gc->dev, "Recv MANA service type:%d\n", type);
+
+ if (gc->in_service) {
+ dev_info(gc->dev, "Already in service\n");
+ break;
+ }
+
+ if (!try_module_get(THIS_MODULE)) {
+ dev_info(gc->dev, "Module is unloading\n");
+ break;
+ }
+
+ mns_wk = kzalloc(sizeof(*mns_wk), GFP_ATOMIC);
+ if (!mns_wk) {
+ module_put(THIS_MODULE);
+ break;
+ }
+
+ dev_info(gc->dev, "Start MANA service type:%d\n", type);
+ gc->in_service = true;
+ mns_wk->pdev = to_pci_dev(gc->dev);
+ mns_wk->type = type;
+ pci_dev_get(mns_wk->pdev);
+ INIT_WORK(&mns_wk->serv_work, mana_serv_func);
+ schedule_work(&mns_wk->serv_work);
+ break;
+
default:
break;
}
@@ -483,7 +647,9 @@ static int mana_gd_register_irq(struct gdma_queue *queue,
}
queue->eq.msix_index = msi_index;
- gic = &gc->irq_contexts[msi_index];
+ gic = xa_load(&gc->irq_contexts, msi_index);
+ if (WARN_ON(!gic))
+ return -EINVAL;
spin_lock_irqsave(&gic->lock, flags);
list_add_rcu(&queue->entry, &gic->eq_list);
@@ -492,7 +658,7 @@ static int mana_gd_register_irq(struct gdma_queue *queue,
return 0;
}
-static void mana_gd_deregiser_irq(struct gdma_queue *queue)
+static void mana_gd_deregister_irq(struct gdma_queue *queue)
{
struct gdma_dev *gd = queue->gdma_dev;
struct gdma_irq_context *gic;
@@ -508,7 +674,10 @@ static void mana_gd_deregiser_irq(struct gdma_queue *queue)
if (WARN_ON(msix_index >= gc->num_msix_usable))
return;
- gic = &gc->irq_contexts[msix_index];
+ gic = xa_load(&gc->irq_contexts, msix_index);
+ if (WARN_ON(!gic))
+ return;
+
spin_lock_irqsave(&gic->lock, flags);
list_for_each_entry_rcu(eq, &gic->eq_list, entry) {
if (queue == eq) {
@@ -542,7 +711,8 @@ int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq)
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err) {
- dev_err(dev, "test_eq failed: %d\n", err);
+ if (mana_need_log(gc, err))
+ dev_err(dev, "test_eq failed: %d\n", err);
goto out;
}
@@ -577,11 +747,11 @@ static void mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets,
if (flush_evenets) {
err = mana_gd_test_eq(gc, queue);
- if (err)
+ if (err && mana_need_log(gc, err))
dev_warn(gc->dev, "Failed to flush EQ: %d\n", err);
}
- mana_gd_deregiser_irq(queue);
+ mana_gd_deregister_irq(queue);
if (queue->eq.disable_needed)
mana_gd_disable_queue(queue);
@@ -723,8 +893,9 @@ int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle)
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status) {
- dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
- err, resp.hdr.status);
+ if (mana_need_log(gc, err))
+ dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
+ err, resp.hdr.status);
return -EPROTO;
}
@@ -1024,8 +1195,9 @@ int mana_gd_deregister_device(struct gdma_dev *gd)
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status) {
- dev_err(gc->dev, "Failed to deregister device: %d, 0x%x\n",
- err, resp.hdr.status);
+ if (mana_need_log(gc, err))
+ dev_err(gc->dev, "Failed to deregister device: %d, 0x%x\n",
+ err, resp.hdr.status);
if (!err)
err = -EPROTO;
}
@@ -1288,7 +1460,49 @@ void mana_gd_free_res_map(struct gdma_resource *r)
r->size = 0;
}
-static int irq_setup(unsigned int *irqs, unsigned int len, int node)
+/*
+ * Spread on CPUs with the following heuristics:
+ *
+ * 1. No more than one IRQ per CPU, if possible;
+ * 2. NUMA locality is the second priority;
+ * 3. Sibling dislocality is the last priority.
+ *
+ * Let's consider this topology:
+ *
+ * Node 0 1
+ * Core 0 1 2 3
+ * CPU 0 1 2 3 4 5 6 7
+ *
+ * The most performant IRQ distribution based on the above topology
+ * and heuristics may look like this:
+ *
+ * IRQ Nodes Cores CPUs
+ * 0 1 0 0-1
+ * 1 1 1 2-3
+ * 2 1 0 0-1
+ * 3 1 1 2-3
+ * 4 2 2 4-5
+ * 5 2 3 6-7
+ * 6 2 2 4-5
+ * 7 2 3 6-7
+ *
+ * The heuristics is implemented as follows.
+ *
+ * The outer for_each() loop resets the 'weight' to the actual number
+ * of CPUs in the hop. Then inner for_each() loop decrements it by the
+ * number of sibling groups (cores) while assigning first set of IRQs
+ * to each group. IRQs 0 and 1 above are distributed this way.
+ *
+ * Now, because NUMA locality is more important, we should walk the
+ * same set of siblings and assign 2nd set of IRQs (2 and 3), and it's
+ * implemented by the medium while() loop. We do like this unless the
+ * number of IRQs assigned on this hop will not become equal to number
+ * of CPUs in the hop (weight == 0). Then we switch to the next hop and
+ * do the same thing.
+ */
+
+static int irq_setup(unsigned int *irqs, unsigned int len, int node,
+ bool skip_first_cpu)
{
const struct cpumask *next, *prev = cpu_none_mask;
cpumask_var_t cpus __free(free_cpumask_var);
@@ -1303,11 +1517,18 @@ static int irq_setup(unsigned int *irqs, unsigned int len, int node)
while (weight > 0) {
cpumask_andnot(cpus, next, prev);
for_each_cpu(cpu, cpus) {
+ cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
+ --weight;
+
+ if (unlikely(skip_first_cpu)) {
+ skip_first_cpu = false;
+ continue;
+ }
+
if (len-- == 0)
goto done;
+
irq_set_affinity_and_hint(*irqs++, topology_sibling_cpumask(cpu));
- cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
- --weight;
}
}
prev = next;
@@ -1317,47 +1538,108 @@ done:
return 0;
}
-static int mana_gd_setup_irqs(struct pci_dev *pdev)
+static int mana_gd_setup_dyn_irqs(struct pci_dev *pdev, int nvec)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
- unsigned int max_queues_per_port;
struct gdma_irq_context *gic;
- unsigned int max_irqs, cpu;
- int start_irq_index = 1;
- int nvec, *irqs, irq;
- int err, i = 0, j;
+ bool skip_first_cpu = false;
+ int *irqs, irq, err, i;
- cpus_read_lock();
- max_queues_per_port = num_online_cpus();
- if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
- max_queues_per_port = MANA_MAX_NUM_QUEUES;
+ irqs = kmalloc_array(nvec, sizeof(int), GFP_KERNEL);
+ if (!irqs)
+ return -ENOMEM;
+
+ /*
+ * While processing the next pci irq vector, we start with index 1,
+ * as IRQ vector at index 0 is already processed for HWC.
+ * However, the population of irqs array starts with index 0, to be
+ * further used in irq_setup()
+ */
+ for (i = 1; i <= nvec; i++) {
+ gic = kzalloc(sizeof(*gic), GFP_KERNEL);
+ if (!gic) {
+ err = -ENOMEM;
+ goto free_irq;
+ }
+ gic->handler = mana_gd_process_eq_events;
+ INIT_LIST_HEAD(&gic->eq_list);
+ spin_lock_init(&gic->lock);
- /* Need 1 interrupt for the Hardware communication Channel (HWC) */
- max_irqs = max_queues_per_port + 1;
+ snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s",
+ i - 1, pci_name(pdev));
- nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX);
- if (nvec < 0) {
- cpus_read_unlock();
- return nvec;
+ /* one pci vector is already allocated for HWC */
+ irqs[i - 1] = pci_irq_vector(pdev, i);
+ if (irqs[i - 1] < 0) {
+ err = irqs[i - 1];
+ goto free_current_gic;
+ }
+
+ err = request_irq(irqs[i - 1], mana_gd_intr, 0, gic->name, gic);
+ if (err)
+ goto free_current_gic;
+
+ xa_store(&gc->irq_contexts, i, gic, GFP_KERNEL);
}
- if (nvec <= num_online_cpus())
- start_irq_index = 0;
- irqs = kmalloc_array((nvec - start_irq_index), sizeof(int), GFP_KERNEL);
- if (!irqs) {
- err = -ENOMEM;
- goto free_irq_vector;
+ /*
+ * When calling irq_setup() for dynamically added IRQs, if number of
+ * CPUs is more than or equal to allocated MSI-X, we need to skip the
+ * first CPU sibling group since they are already affinitized to HWC IRQ
+ */
+ cpus_read_lock();
+ if (gc->num_msix_usable <= num_online_cpus())
+ skip_first_cpu = true;
+
+ err = irq_setup(irqs, nvec, gc->numa_node, skip_first_cpu);
+ if (err) {
+ cpus_read_unlock();
+ goto free_irq;
}
- gc->irq_contexts = kcalloc(nvec, sizeof(struct gdma_irq_context),
- GFP_KERNEL);
- if (!gc->irq_contexts) {
- err = -ENOMEM;
- goto free_irq_array;
+ cpus_read_unlock();
+ kfree(irqs);
+ return 0;
+
+free_current_gic:
+ kfree(gic);
+free_irq:
+ for (i -= 1; i > 0; i--) {
+ irq = pci_irq_vector(pdev, i);
+ gic = xa_load(&gc->irq_contexts, i);
+ if (WARN_ON(!gic))
+ continue;
+
+ irq_update_affinity_hint(irq, NULL);
+ free_irq(irq, gic);
+ xa_erase(&gc->irq_contexts, i);
+ kfree(gic);
}
+ kfree(irqs);
+ return err;
+}
+
+static int mana_gd_setup_irqs(struct pci_dev *pdev, int nvec)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ struct gdma_irq_context *gic;
+ int *irqs, *start_irqs, irq;
+ unsigned int cpu;
+ int err, i;
+
+ irqs = kmalloc_array(nvec, sizeof(int), GFP_KERNEL);
+ if (!irqs)
+ return -ENOMEM;
+
+ start_irqs = irqs;
for (i = 0; i < nvec; i++) {
- gic = &gc->irq_contexts[i];
+ gic = kzalloc(sizeof(*gic), GFP_KERNEL);
+ if (!gic) {
+ err = -ENOMEM;
+ goto free_irq;
+ }
+
gic->handler = mana_gd_process_eq_events;
INIT_LIST_HEAD(&gic->eq_list);
spin_lock_init(&gic->lock);
@@ -1369,69 +1651,128 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s",
i - 1, pci_name(pdev));
- irq = pci_irq_vector(pdev, i);
- if (irq < 0) {
- err = irq;
- goto free_irq;
+ irqs[i] = pci_irq_vector(pdev, i);
+ if (irqs[i] < 0) {
+ err = irqs[i];
+ goto free_current_gic;
}
- if (!i) {
- err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
- if (err)
- goto free_irq;
-
- /* If number of IRQ is one extra than number of online CPUs,
- * then we need to assign IRQ0 (hwc irq) and IRQ1 to
- * same CPU.
- * Else we will use different CPUs for IRQ0 and IRQ1.
- * Also we are using cpumask_local_spread instead of
- * cpumask_first for the node, because the node can be
- * mem only.
- */
- if (start_irq_index) {
- cpu = cpumask_local_spread(i, gc->numa_node);
- irq_set_affinity_and_hint(irq, cpumask_of(cpu));
- } else {
- irqs[start_irq_index] = irq;
- }
- } else {
- irqs[i - start_irq_index] = irq;
- err = request_irq(irqs[i - start_irq_index], mana_gd_intr, 0,
- gic->name, gic);
- if (err)
- goto free_irq;
- }
+ err = request_irq(irqs[i], mana_gd_intr, 0, gic->name, gic);
+ if (err)
+ goto free_current_gic;
+
+ xa_store(&gc->irq_contexts, i, gic, GFP_KERNEL);
}
- err = irq_setup(irqs, (nvec - start_irq_index), gc->numa_node);
- if (err)
+ /* If number of IRQ is one extra than number of online CPUs,
+ * then we need to assign IRQ0 (hwc irq) and IRQ1 to
+ * same CPU.
+ * Else we will use different CPUs for IRQ0 and IRQ1.
+ * Also we are using cpumask_local_spread instead of
+ * cpumask_first for the node, because the node can be
+ * mem only.
+ */
+ cpus_read_lock();
+ if (nvec > num_online_cpus()) {
+ cpu = cpumask_local_spread(0, gc->numa_node);
+ irq_set_affinity_and_hint(irqs[0], cpumask_of(cpu));
+ irqs++;
+ nvec -= 1;
+ }
+
+ err = irq_setup(irqs, nvec, gc->numa_node, false);
+ if (err) {
+ cpus_read_unlock();
goto free_irq;
+ }
- gc->max_num_msix = nvec;
- gc->num_msix_usable = nvec;
cpus_read_unlock();
- kfree(irqs);
+ kfree(start_irqs);
return 0;
+free_current_gic:
+ kfree(gic);
free_irq:
- for (j = i - 1; j >= 0; j--) {
- irq = pci_irq_vector(pdev, j);
- gic = &gc->irq_contexts[j];
+ for (i -= 1; i >= 0; i--) {
+ irq = pci_irq_vector(pdev, i);
+ gic = xa_load(&gc->irq_contexts, i);
+ if (WARN_ON(!gic))
+ continue;
irq_update_affinity_hint(irq, NULL);
free_irq(irq, gic);
+ xa_erase(&gc->irq_contexts, i);
+ kfree(gic);
}
- kfree(gc->irq_contexts);
- gc->irq_contexts = NULL;
-free_irq_array:
- kfree(irqs);
-free_irq_vector:
- cpus_read_unlock();
- pci_free_irq_vectors(pdev);
+ kfree(start_irqs);
return err;
}
+static int mana_gd_setup_hwc_irqs(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ unsigned int max_irqs, min_irqs;
+ int nvec, err;
+
+ if (pci_msix_can_alloc_dyn(pdev)) {
+ max_irqs = 1;
+ min_irqs = 1;
+ } else {
+ /* Need 1 interrupt for HWC */
+ max_irqs = min(num_online_cpus(), MANA_MAX_NUM_QUEUES) + 1;
+ min_irqs = 2;
+ }
+
+ nvec = pci_alloc_irq_vectors(pdev, min_irqs, max_irqs, PCI_IRQ_MSIX);
+ if (nvec < 0)
+ return nvec;
+
+ err = mana_gd_setup_irqs(pdev, nvec);
+ if (err) {
+ pci_free_irq_vectors(pdev);
+ return err;
+ }
+
+ gc->num_msix_usable = nvec;
+ gc->max_num_msix = nvec;
+
+ return 0;
+}
+
+static int mana_gd_setup_remaining_irqs(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ struct msi_map irq_map;
+ int max_irqs, i, err;
+
+ if (!pci_msix_can_alloc_dyn(pdev))
+ /* remain irqs are already allocated with HWC IRQ */
+ return 0;
+
+ /* allocate only remaining IRQs*/
+ max_irqs = gc->num_msix_usable - 1;
+
+ for (i = 1; i <= max_irqs; i++) {
+ irq_map = pci_msix_alloc_irq_at(pdev, i, NULL);
+ if (!irq_map.virq) {
+ err = irq_map.index;
+ /* caller will handle cleaning up all allocated
+ * irqs, after HWC is destroyed
+ */
+ return err;
+ }
+ }
+
+ err = mana_gd_setup_dyn_irqs(pdev, max_irqs);
+ if (err)
+ return err;
+
+ gc->max_num_msix = gc->max_num_msix + max_irqs;
+
+ return 0;
+}
+
static void mana_gd_remove_irqs(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
@@ -1446,19 +1787,21 @@ static void mana_gd_remove_irqs(struct pci_dev *pdev)
if (irq < 0)
continue;
- gic = &gc->irq_contexts[i];
+ gic = xa_load(&gc->irq_contexts, i);
+ if (WARN_ON(!gic))
+ continue;
/* Need to clear the hint before free_irq */
irq_update_affinity_hint(irq, NULL);
free_irq(irq, gic);
+ xa_erase(&gc->irq_contexts, i);
+ kfree(gic);
}
pci_free_irq_vectors(pdev);
gc->max_num_msix = 0;
gc->num_msix_usable = 0;
- kfree(gc->irq_contexts);
- gc->irq_contexts = NULL;
}
static int mana_gd_setup(struct pci_dev *pdev)
@@ -1473,9 +1816,10 @@ static int mana_gd_setup(struct pci_dev *pdev)
if (!gc->service_wq)
return -ENOMEM;
- err = mana_gd_setup_irqs(pdev);
+ err = mana_gd_setup_hwc_irqs(pdev);
if (err) {
- dev_err(gc->dev, "Failed to setup IRQs: %d\n", err);
+ dev_err(gc->dev, "Failed to setup IRQs for HWC creation: %d\n",
+ err);
goto free_workqueue;
}
@@ -1491,6 +1835,12 @@ static int mana_gd_setup(struct pci_dev *pdev)
if (err)
goto destroy_hwc;
+ err = mana_gd_setup_remaining_irqs(pdev);
+ if (err) {
+ dev_err(gc->dev, "Failed to setup remaining IRQs: %d", err);
+ goto destroy_hwc;
+ }
+
err = mana_gd_detect_devices(pdev);
if (err)
goto destroy_hwc;
@@ -1571,6 +1921,7 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
gc->is_pf = mana_is_pf(pdev->device);
gc->bar0_va = bar0_va;
gc->dev = &pdev->dev;
+ xa_init(&gc->irq_contexts);
if (gc->is_pf)
gc->mana_pci_debugfs = debugfs_create_dir("0", mana_debugfs_root);
@@ -1605,6 +1956,7 @@ unmap_bar:
*/
debugfs_remove_recursive(gc->mana_pci_debugfs);
gc->mana_pci_debugfs = NULL;
+ xa_destroy(&gc->irq_contexts);
pci_iounmap(pdev, bar0_va);
free_gc:
pci_set_drvdata(pdev, NULL);
@@ -1630,6 +1982,8 @@ static void mana_gd_remove(struct pci_dev *pdev)
gc->mana_pci_debugfs = NULL;
+ xa_destroy(&gc->irq_contexts);
+
pci_iounmap(pdev, gc->bar0_va);
vfree(gc);
@@ -1641,7 +1995,7 @@ static void mana_gd_remove(struct pci_dev *pdev)
}
/* The 'state' parameter is not used. */
-static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
+int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
@@ -1657,7 +2011,7 @@ static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
* fail -- if this happens, it's safer to just report an error than try to undo
* what has been done.
*/
-static int mana_gd_resume(struct pci_dev *pdev)
+int mana_gd_resume(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
int err;
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index a8c4d8db75a5..ef072e24c46d 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2021, Microsoft Corporation. */
#include <net/mana/gdma.h>
+#include <net/mana/mana.h>
#include <net/mana/hw_channel.h>
#include <linux/vmalloc.h>
@@ -879,7 +880,9 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
if (!wait_for_completion_timeout(&ctx->comp_event,
(msecs_to_jiffies(hwc->hwc_timeout)))) {
- dev_err(hwc->dev, "HWC: Request timed out!\n");
+ if (hwc->hwc_timeout != 0)
+ dev_err(hwc->dev, "HWC: Request timed out!\n");
+
err = -ETIMEDOUT;
goto out;
}
@@ -890,8 +893,13 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
}
if (ctx->status_code && ctx->status_code != GDMA_STATUS_MORE_ENTRIES) {
- dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
- ctx->status_code);
+ if (ctx->status_code == GDMA_STATUS_CMD_UNSUPPORTED) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ if (req_msg->req.msg_type != MANA_QUERY_PHY_STAT)
+ dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
+ ctx->status_code);
err = -EPROTO;
goto out;
}
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index ccd2885c939e..550843e2164b 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -10,6 +10,7 @@
#include <linux/filter.h>
#include <linux/mm.h>
#include <linux/pci.h>
+#include <linux/export.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
@@ -47,6 +48,15 @@ static const struct file_operations mana_dbg_q_fops = {
.read = mana_dbg_q_read,
};
+static bool mana_en_need_log(struct mana_port_context *apc, int err)
+{
+ if (apc && apc->ac && apc->ac->gdma_dev &&
+ apc->ac->gdma_dev->gdma_context)
+ return mana_need_log(apc->ac->gdma_dev->gdma_context, err);
+ else
+ return true;
+}
+
/* Microsoft Azure Network Adapter (MANA) functions */
static int mana_open(struct net_device *ndev)
@@ -251,10 +261,10 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct netdev_queue *net_txq;
struct mana_stats_tx *tx_stats;
struct gdma_queue *gdma_sq;
+ int err, len, num_gso_seg;
unsigned int csum_type;
struct mana_txq *txq;
struct mana_cq *cq;
- int err, len;
if (unlikely(!apc->port_is_up))
goto tx_drop;
@@ -407,6 +417,7 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_queue_tail(&txq->pending_skbs, skb);
len = skb->len;
+ num_gso_seg = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
net_txq = netdev_get_tx_queue(ndev, txq_idx);
err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
@@ -431,10 +442,13 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* skb may be freed after mana_gd_post_work_request. Do not use it. */
skb = NULL;
+ /* Populated the packet and bytes counters based on post GSO packet
+ * calculations
+ */
tx_stats = &txq->stats;
u64_stats_update_begin(&tx_stats->syncp);
- tx_stats->packets++;
- tx_stats->bytes += len;
+ tx_stats->packets += num_gso_seg;
+ tx_stats->bytes += len + ((num_gso_seg - 1) * gso_hs);
u64_stats_update_end(&tx_stats->syncp);
tx_busy:
@@ -719,6 +733,78 @@ out:
return err;
}
+static int mana_shaper_set(struct net_shaper_binding *binding,
+ const struct net_shaper *shaper,
+ struct netlink_ext_ack *extack)
+{
+ struct mana_port_context *apc = netdev_priv(binding->netdev);
+ u32 old_speed, rate;
+ int err;
+
+ if (shaper->handle.scope != NET_SHAPER_SCOPE_NETDEV) {
+ NL_SET_ERR_MSG_MOD(extack, "net shaper scope should be netdev");
+ return -EINVAL;
+ }
+
+ if (apc->handle.id && shaper->handle.id != apc->handle.id) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot create multiple shapers");
+ return -EOPNOTSUPP;
+ }
+
+ if (!shaper->bw_max || (shaper->bw_max % 100000000)) {
+ NL_SET_ERR_MSG_MOD(extack, "Please use multiples of 100Mbps for bandwidth");
+ return -EINVAL;
+ }
+
+ rate = div_u64(shaper->bw_max, 1000); /* Convert bps to Kbps */
+ rate = div_u64(rate, 1000); /* Convert Kbps to Mbps */
+
+ /* Get current speed */
+ err = mana_query_link_cfg(apc);
+ old_speed = (err) ? SPEED_UNKNOWN : apc->speed;
+
+ if (!err) {
+ err = mana_set_bw_clamp(apc, rate, TRI_STATE_TRUE);
+ apc->speed = (err) ? old_speed : rate;
+ apc->handle = (err) ? apc->handle : shaper->handle;
+ }
+
+ return err;
+}
+
+static int mana_shaper_del(struct net_shaper_binding *binding,
+ const struct net_shaper_handle *handle,
+ struct netlink_ext_ack *extack)
+{
+ struct mana_port_context *apc = netdev_priv(binding->netdev);
+ int err;
+
+ err = mana_set_bw_clamp(apc, 0, TRI_STATE_FALSE);
+
+ if (!err) {
+ /* Reset mana port context parameters */
+ apc->handle.id = 0;
+ apc->handle.scope = NET_SHAPER_SCOPE_UNSPEC;
+ apc->speed = 0;
+ }
+
+ return err;
+}
+
+static void mana_shaper_cap(struct net_shaper_binding *binding,
+ enum net_shaper_scope scope,
+ unsigned long *flags)
+{
+ *flags = BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MAX) |
+ BIT(NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS);
+}
+
+static const struct net_shaper_ops mana_shaper_ops = {
+ .set = mana_shaper_set,
+ .delete = mana_shaper_del,
+ .capabilities = mana_shaper_cap,
+};
+
static const struct net_device_ops mana_devops = {
.ndo_open = mana_open,
.ndo_stop = mana_close,
@@ -729,6 +815,7 @@ static const struct net_device_ops mana_devops = {
.ndo_bpf = mana_bpf,
.ndo_xdp_xmit = mana_xdp_xmit,
.ndo_change_mtu = mana_change_mtu,
+ .net_shaper_ops = &mana_shaper_ops,
};
static void mana_cleanup_port_context(struct mana_port_context *apc)
@@ -774,8 +861,13 @@ static int mana_send_request(struct mana_context *ac, void *in_buf,
err = mana_gd_send_request(gc, in_len, in_buf, out_len,
out_buf);
if (err || resp->status) {
- dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
- err, resp->status);
+ if (err == -EOPNOTSUPP)
+ return err;
+
+ if (req->req.msg_type != MANA_QUERY_PHY_STAT &&
+ mana_need_log(gc, err))
+ dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
+ err, resp->status);
return err ? err : -EPROTO;
}
@@ -850,8 +942,10 @@ static void mana_pf_deregister_hw_vport(struct mana_port_context *apc)
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err) {
- netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
- err);
+ if (mana_en_need_log(apc, err))
+ netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
+ err);
+
return;
}
@@ -906,8 +1000,10 @@ static void mana_pf_deregister_filter(struct mana_port_context *apc)
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err) {
- netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
- err);
+ if (mana_en_need_log(apc, err))
+ netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
+ err);
+
return;
}
@@ -1137,7 +1233,9 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
err = mana_send_request(apc->ac, req, req_buf_size, &resp,
sizeof(resp));
if (err) {
- netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
+ if (mana_en_need_log(apc, err))
+ netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
+
goto out;
}
@@ -1161,6 +1259,95 @@ out:
return err;
}
+int mana_query_link_cfg(struct mana_port_context *apc)
+{
+ struct net_device *ndev = apc->ndev;
+ struct mana_query_link_config_resp resp = {};
+ struct mana_query_link_config_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_LINK_CONFIG,
+ sizeof(req), sizeof(resp));
+
+ req.vport = apc->port_handle;
+ req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+
+ if (err) {
+ if (err == -EOPNOTSUPP) {
+ netdev_info_once(ndev, "MANA_QUERY_LINK_CONFIG not supported\n");
+ return err;
+ }
+ netdev_err(ndev, "Failed to query link config: %d\n", err);
+ return err;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_LINK_CONFIG,
+ sizeof(resp));
+
+ if (err || resp.hdr.status) {
+ netdev_err(ndev, "Failed to query link config: %d, 0x%x\n", err,
+ resp.hdr.status);
+ if (!err)
+ err = -EOPNOTSUPP;
+ return err;
+ }
+
+ if (resp.qos_unconfigured) {
+ err = -EINVAL;
+ return err;
+ }
+ apc->speed = resp.link_speed_mbps;
+ apc->max_speed = resp.qos_speed_mbps;
+ return 0;
+}
+
+int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed,
+ int enable_clamping)
+{
+ struct mana_set_bw_clamp_resp resp = {};
+ struct mana_set_bw_clamp_req req = {};
+ struct net_device *ndev = apc->ndev;
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_SET_BW_CLAMP,
+ sizeof(req), sizeof(resp));
+ req.vport = apc->port_handle;
+ req.link_speed_mbps = speed;
+ req.enable_clamping = enable_clamping;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+
+ if (err) {
+ if (err == -EOPNOTSUPP) {
+ netdev_info_once(ndev, "MANA_SET_BW_CLAMP not supported\n");
+ return err;
+ }
+ netdev_err(ndev, "Failed to set bandwidth clamp for speed %u, err = %d",
+ speed, err);
+ return err;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_SET_BW_CLAMP,
+ sizeof(resp));
+
+ if (err || resp.hdr.status) {
+ netdev_err(ndev, "Failed to set bandwidth clamp: %d, 0x%x\n", err,
+ resp.hdr.status);
+ if (!err)
+ err = -EOPNOTSUPP;
+ return err;
+ }
+
+ if (resp.qos_unconfigured)
+ netdev_info(ndev, "QoS is unconfigured\n");
+
+ return 0;
+}
+
int mana_create_wq_obj(struct mana_port_context *apc,
mana_handle_t vport,
u32 wq_type, struct mana_obj_spec *wq_spec,
@@ -1232,7 +1419,9 @@ void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err) {
- netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
+ if (mana_en_need_log(apc, err))
+ netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
+
return;
}
@@ -1911,8 +2100,10 @@ static void mana_destroy_txq(struct mana_port_context *apc)
napi = &apc->tx_qp[i].tx_cq.napi;
if (apc->tx_qp[i].txq.napi_initialized) {
napi_synchronize(napi);
- napi_disable(napi);
- netif_napi_del(napi);
+ netdev_lock_ops_to_full(napi->dev);
+ napi_disable_locked(napi);
+ netif_napi_del_locked(napi);
+ netdev_unlock_full_to_ops(napi->dev);
apc->tx_qp[i].txq.napi_initialized = false;
}
mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
@@ -2064,8 +2255,11 @@ static int mana_create_txq(struct mana_port_context *apc,
mana_create_txq_debugfs(apc, i);
- netif_napi_add_tx(net, &cq->napi, mana_poll);
- napi_enable(&cq->napi);
+ set_bit(NAPI_STATE_NO_BUSY_POLL, &cq->napi.state);
+ netdev_lock_ops_to_full(net);
+ netif_napi_add_locked(net, &cq->napi, mana_poll);
+ napi_enable_locked(&cq->napi);
+ netdev_unlock_full_to_ops(net);
txq->napi_initialized = true;
mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
@@ -2101,9 +2295,10 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
if (napi_initialized) {
napi_synchronize(napi);
- napi_disable(napi);
-
- netif_napi_del(napi);
+ netdev_lock_ops_to_full(napi->dev);
+ napi_disable_locked(napi);
+ netif_napi_del_locked(napi);
+ netdev_unlock_full_to_ops(napi->dev);
}
xdp_rxq_info_unreg(&rxq->xdp_rxq);
@@ -2354,14 +2549,18 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
gc->cq_table[cq->gdma_id] = cq->gdma_cq;
- netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1);
+ netdev_lock_ops_to_full(ndev);
+ netif_napi_add_weight_locked(ndev, &cq->napi, mana_poll, 1);
+ netdev_unlock_full_to_ops(ndev);
WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
cq->napi.napi_id));
WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
rxq->page_pool));
- napi_enable(&cq->napi);
+ netdev_lock_ops_to_full(ndev);
+ napi_enable_locked(&cq->napi);
+ netdev_unlock_full_to_ops(ndev);
mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
out:
@@ -2611,6 +2810,88 @@ void mana_query_gf_stats(struct mana_port_context *apc)
apc->eth_stats.hc_tx_err_gdma = resp.tx_err_gdma;
}
+void mana_query_phy_stats(struct mana_port_context *apc)
+{
+ struct mana_query_phy_stat_resp resp = {};
+ struct mana_query_phy_stat_req req = {};
+ struct net_device *ndev = apc->ndev;
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_PHY_STAT,
+ sizeof(req), sizeof(resp));
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err)
+ return;
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_PHY_STAT,
+ sizeof(resp));
+ if (err || resp.hdr.status) {
+ netdev_err(ndev,
+ "Failed to query PHY stats: %d, resp:0x%x\n",
+ err, resp.hdr.status);
+ return;
+ }
+
+ /* Aggregate drop counters */
+ apc->phy_stats.rx_pkt_drop_phy = resp.rx_pkt_drop_phy;
+ apc->phy_stats.tx_pkt_drop_phy = resp.tx_pkt_drop_phy;
+
+ /* Per TC traffic Counters */
+ apc->phy_stats.rx_pkt_tc0_phy = resp.rx_pkt_tc0_phy;
+ apc->phy_stats.tx_pkt_tc0_phy = resp.tx_pkt_tc0_phy;
+ apc->phy_stats.rx_pkt_tc1_phy = resp.rx_pkt_tc1_phy;
+ apc->phy_stats.tx_pkt_tc1_phy = resp.tx_pkt_tc1_phy;
+ apc->phy_stats.rx_pkt_tc2_phy = resp.rx_pkt_tc2_phy;
+ apc->phy_stats.tx_pkt_tc2_phy = resp.tx_pkt_tc2_phy;
+ apc->phy_stats.rx_pkt_tc3_phy = resp.rx_pkt_tc3_phy;
+ apc->phy_stats.tx_pkt_tc3_phy = resp.tx_pkt_tc3_phy;
+ apc->phy_stats.rx_pkt_tc4_phy = resp.rx_pkt_tc4_phy;
+ apc->phy_stats.tx_pkt_tc4_phy = resp.tx_pkt_tc4_phy;
+ apc->phy_stats.rx_pkt_tc5_phy = resp.rx_pkt_tc5_phy;
+ apc->phy_stats.tx_pkt_tc5_phy = resp.tx_pkt_tc5_phy;
+ apc->phy_stats.rx_pkt_tc6_phy = resp.rx_pkt_tc6_phy;
+ apc->phy_stats.tx_pkt_tc6_phy = resp.tx_pkt_tc6_phy;
+ apc->phy_stats.rx_pkt_tc7_phy = resp.rx_pkt_tc7_phy;
+ apc->phy_stats.tx_pkt_tc7_phy = resp.tx_pkt_tc7_phy;
+
+ /* Per TC byte Counters */
+ apc->phy_stats.rx_byte_tc0_phy = resp.rx_byte_tc0_phy;
+ apc->phy_stats.tx_byte_tc0_phy = resp.tx_byte_tc0_phy;
+ apc->phy_stats.rx_byte_tc1_phy = resp.rx_byte_tc1_phy;
+ apc->phy_stats.tx_byte_tc1_phy = resp.tx_byte_tc1_phy;
+ apc->phy_stats.rx_byte_tc2_phy = resp.rx_byte_tc2_phy;
+ apc->phy_stats.tx_byte_tc2_phy = resp.tx_byte_tc2_phy;
+ apc->phy_stats.rx_byte_tc3_phy = resp.rx_byte_tc3_phy;
+ apc->phy_stats.tx_byte_tc3_phy = resp.tx_byte_tc3_phy;
+ apc->phy_stats.rx_byte_tc4_phy = resp.rx_byte_tc4_phy;
+ apc->phy_stats.tx_byte_tc4_phy = resp.tx_byte_tc4_phy;
+ apc->phy_stats.rx_byte_tc5_phy = resp.rx_byte_tc5_phy;
+ apc->phy_stats.tx_byte_tc5_phy = resp.tx_byte_tc5_phy;
+ apc->phy_stats.rx_byte_tc6_phy = resp.rx_byte_tc6_phy;
+ apc->phy_stats.tx_byte_tc6_phy = resp.tx_byte_tc6_phy;
+ apc->phy_stats.rx_byte_tc7_phy = resp.rx_byte_tc7_phy;
+ apc->phy_stats.tx_byte_tc7_phy = resp.tx_byte_tc7_phy;
+
+ /* Per TC pause Counters */
+ apc->phy_stats.rx_pause_tc0_phy = resp.rx_pause_tc0_phy;
+ apc->phy_stats.tx_pause_tc0_phy = resp.tx_pause_tc0_phy;
+ apc->phy_stats.rx_pause_tc1_phy = resp.rx_pause_tc1_phy;
+ apc->phy_stats.tx_pause_tc1_phy = resp.tx_pause_tc1_phy;
+ apc->phy_stats.rx_pause_tc2_phy = resp.rx_pause_tc2_phy;
+ apc->phy_stats.tx_pause_tc2_phy = resp.tx_pause_tc2_phy;
+ apc->phy_stats.rx_pause_tc3_phy = resp.rx_pause_tc3_phy;
+ apc->phy_stats.tx_pause_tc3_phy = resp.tx_pause_tc3_phy;
+ apc->phy_stats.rx_pause_tc4_phy = resp.rx_pause_tc4_phy;
+ apc->phy_stats.tx_pause_tc4_phy = resp.tx_pause_tc4_phy;
+ apc->phy_stats.rx_pause_tc5_phy = resp.rx_pause_tc5_phy;
+ apc->phy_stats.tx_pause_tc5_phy = resp.tx_pause_tc5_phy;
+ apc->phy_stats.rx_pause_tc6_phy = resp.rx_pause_tc6_phy;
+ apc->phy_stats.tx_pause_tc6_phy = resp.tx_pause_tc6_phy;
+ apc->phy_stats.rx_pause_tc7_phy = resp.rx_pause_tc7_phy;
+ apc->phy_stats.tx_pause_tc7_phy = resp.tx_pause_tc7_phy;
+}
+
static int mana_init_port(struct net_device *ndev)
{
struct mana_port_context *apc = netdev_priv(ndev);
@@ -2805,11 +3086,10 @@ static int mana_dealloc_queues(struct net_device *ndev)
apc->rss_state = TRI_STATE_FALSE;
err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
- if (err) {
+ if (err && mana_en_need_log(apc, err))
netdev_err(ndev, "Failed to disable vPort: %d\n", err);
- return err;
- }
+ /* Even in err case, still need to cleanup the vPort */
mana_destroy_vport(apc);
return 0;
@@ -2918,6 +3198,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
goto free_indir;
}
+ debugfs_create_u32("current_speed", 0400, apc->mana_port_debugfs, &apc->speed);
+
return 0;
free_indir:
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index c419626073f5..a1afa75a9463 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -7,10 +7,12 @@
#include <net/mana/mana.h>
-static const struct {
+struct mana_stats_desc {
char name[ETH_GSTRING_LEN];
u16 offset;
-} mana_eth_stats[] = {
+};
+
+static const struct mana_stats_desc mana_eth_stats[] = {
{"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
{"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
{"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_stats,
@@ -75,6 +77,59 @@ static const struct {
rx_cqe_unknown_type)},
};
+static const struct mana_stats_desc mana_phy_stats[] = {
+ { "hc_rx_pkt_drop_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_drop_phy) },
+ { "hc_tx_pkt_drop_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_drop_phy) },
+ { "hc_tc0_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc0_phy) },
+ { "hc_tc0_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc0_phy) },
+ { "hc_tc0_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc0_phy) },
+ { "hc_tc0_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc0_phy) },
+ { "hc_tc1_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc1_phy) },
+ { "hc_tc1_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc1_phy) },
+ { "hc_tc1_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc1_phy) },
+ { "hc_tc1_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc1_phy) },
+ { "hc_tc2_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc2_phy) },
+ { "hc_tc2_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc2_phy) },
+ { "hc_tc2_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc2_phy) },
+ { "hc_tc2_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc2_phy) },
+ { "hc_tc3_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc3_phy) },
+ { "hc_tc3_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc3_phy) },
+ { "hc_tc3_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc3_phy) },
+ { "hc_tc3_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc3_phy) },
+ { "hc_tc4_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc4_phy) },
+ { "hc_tc4_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc4_phy) },
+ { "hc_tc4_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc4_phy) },
+ { "hc_tc4_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc4_phy) },
+ { "hc_tc5_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc5_phy) },
+ { "hc_tc5_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc5_phy) },
+ { "hc_tc5_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc5_phy) },
+ { "hc_tc5_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc5_phy) },
+ { "hc_tc6_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc6_phy) },
+ { "hc_tc6_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc6_phy) },
+ { "hc_tc6_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc6_phy) },
+ { "hc_tc6_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc6_phy) },
+ { "hc_tc7_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc7_phy) },
+ { "hc_tc7_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc7_phy) },
+ { "hc_tc7_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc7_phy) },
+ { "hc_tc7_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc7_phy) },
+ { "hc_tc0_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc0_phy) },
+ { "hc_tc0_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc0_phy) },
+ { "hc_tc1_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc1_phy) },
+ { "hc_tc1_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc1_phy) },
+ { "hc_tc2_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc2_phy) },
+ { "hc_tc2_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc2_phy) },
+ { "hc_tc3_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc3_phy) },
+ { "hc_tc3_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc3_phy) },
+ { "hc_tc4_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc4_phy) },
+ { "hc_tc4_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc4_phy) },
+ { "hc_tc5_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc5_phy) },
+ { "hc_tc5_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc5_phy) },
+ { "hc_tc6_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc6_phy) },
+ { "hc_tc6_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc6_phy) },
+ { "hc_tc7_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc7_phy) },
+ { "hc_tc7_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc7_phy) },
+};
+
static int mana_get_sset_count(struct net_device *ndev, int stringset)
{
struct mana_port_context *apc = netdev_priv(ndev);
@@ -83,8 +138,8 @@ static int mana_get_sset_count(struct net_device *ndev, int stringset)
if (stringset != ETH_SS_STATS)
return -EINVAL;
- return ARRAY_SIZE(mana_eth_stats) + num_queues *
- (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT);
+ return ARRAY_SIZE(mana_eth_stats) + ARRAY_SIZE(mana_phy_stats) +
+ num_queues * (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT);
}
static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
@@ -99,6 +154,9 @@ static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++)
ethtool_puts(&data, mana_eth_stats[i].name);
+ for (i = 0; i < ARRAY_SIZE(mana_phy_stats); i++)
+ ethtool_puts(&data, mana_phy_stats[i].name);
+
for (i = 0; i < num_queues; i++) {
ethtool_sprintf(&data, "rx_%d_packets", i);
ethtool_sprintf(&data, "rx_%d_bytes", i);
@@ -128,6 +186,7 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
struct mana_port_context *apc = netdev_priv(ndev);
unsigned int num_queues = apc->num_queues;
void *eth_stats = &apc->eth_stats;
+ void *phy_stats = &apc->phy_stats;
struct mana_stats_rx *rx_stats;
struct mana_stats_tx *tx_stats;
unsigned int start;
@@ -151,9 +210,18 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
/* we call mana function to update stats from GDMA */
mana_query_gf_stats(apc);
+ /* We call this mana function to get the phy stats from GDMA and includes
+ * aggregate tx/rx drop counters, Per-TC(Traffic Channel) tx/rx and pause
+ * counters.
+ */
+ mana_query_phy_stats(apc);
+
for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++)
data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset);
+ for (q = 0; q < ARRAY_SIZE(mana_phy_stats); q++)
+ data[i++] = *(u64 *)(phy_stats + mana_phy_stats[q].offset);
+
for (q = 0; q < num_queues; q++) {
rx_stats = &apc->rxqs[q]->stats;
@@ -427,6 +495,12 @@ out:
static int mana_get_link_ksettings(struct net_device *ndev,
struct ethtool_link_ksettings *cmd)
{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ int err;
+
+ err = mana_query_link_cfg(apc);
+ cmd->base.speed = (err) ? SPEED_UNKNOWN : apc->max_speed;
+
cmd->base.duplex = DUPLEX_FULL;
cmd->base.port = PORT_OTHER;
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 27443e346f9f..5026b0263d43 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -4707,7 +4707,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
/*
* rx_traffic_int reg is an R1 register, writing all 1's
* will ensure that the actual interrupt causing bit
- * get's cleared and hence a read can be avoided.
+ * gets cleared and hence a read can be avoided.
*/
if (reason & GEN_INTR_RXTRAFFIC)
writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
@@ -4721,7 +4721,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
/*
* tx_traffic_int reg is an R1 register, writing all 1's
- * will ensure that the actual interrupt causing bit get's
+ * will ensure that the actual interrupt causing bit gets
* cleared and hence a read can be avoided.
*/
if (reason & GEN_INTR_TXTRAFFIC)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 932f59d70f41..132626a3f9f7 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2394,8 +2394,7 @@ static int nfp_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
static const struct udp_tunnel_nic_info nfp_udp_tunnels = {
.sync_table = nfp_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {
{
.n_entries = NFP_NET_N_VXLAN_PORTS,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index d8b735ccf899..d843d1e19715 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -77,7 +77,7 @@ DEFINE_SHOW_ATTRIBUTE(nfp_rx_q);
static int nfp_tx_q_show(struct seq_file *file, void *data);
DEFINE_SHOW_ATTRIBUTE(nfp_tx_q);
-static int nfp_tx_q_show(struct seq_file *file, void *data)
+static int __nfp_tx_q_show(struct seq_file *file, void *data, bool is_xdp)
{
struct nfp_net_r_vector *r_vec = file->private;
struct nfp_net_tx_ring *tx_ring;
@@ -86,10 +86,10 @@ static int nfp_tx_q_show(struct seq_file *file, void *data)
rtnl_lock();
- if (debugfs_real_fops(file->file) == &nfp_tx_q_fops)
- tx_ring = r_vec->tx_ring;
- else
+ if (is_xdp)
tx_ring = r_vec->xdp_ring;
+ else
+ tx_ring = r_vec->tx_ring;
if (!r_vec->nfp_net || !tx_ring)
goto out;
nn = r_vec->nfp_net;
@@ -115,9 +115,14 @@ out:
return 0;
}
+static int nfp_tx_q_show(struct seq_file *file, void *data)
+{
+ return __nfp_tx_q_show(file, data, false);
+}
+
static int nfp_xdp_q_show(struct seq_file *file, void *data)
{
- return nfp_tx_q_show(file, data);
+ return __nfp_tx_q_show(file, data, true);
}
DEFINE_SHOW_ATTRIBUTE(nfp_xdp_q);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index fbca8d0efd85..a36215195923 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -1303,9 +1303,10 @@ static u32 ethtool_flow_to_nfp_flag(u32 flow_type)
return xlate_ethtool_to_nfp[flow_type];
}
-static int nfp_net_get_rss_hash_opts(struct nfp_net *nn,
- struct ethtool_rxnfc *cmd)
+static int nfp_net_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct nfp_net *nn = netdev_priv(netdev);
u32 nfp_rss_flag;
cmd->data = 0;
@@ -1451,16 +1452,16 @@ static int nfp_net_get_rxnfc(struct net_device *netdev,
case ETHTOOL_GRXCLSRLALL:
cmd->data = NFP_FS_MAX_ENTRY;
return nfp_net_get_fs_loc(nn, rule_locs);
- case ETHTOOL_GRXFH:
- return nfp_net_get_rss_hash_opts(nn, cmd);
default:
return -EOPNOTSUPP;
}
}
-static int nfp_net_set_rss_hash_opt(struct nfp_net *nn,
- struct ethtool_rxnfc *nfc)
+static int nfp_net_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct nfp_net *nn = netdev_priv(netdev);
u32 new_rss_cfg = nn->rss_cfg;
u32 nfp_rss_flag;
int err;
@@ -1763,8 +1764,6 @@ static int nfp_net_set_rxnfc(struct net_device *netdev,
struct nfp_net *nn = netdev_priv(netdev);
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- return nfp_net_set_rss_hash_opt(nn, cmd);
case ETHTOOL_SRXCLSRLINS:
return nfp_net_fs_add(nn, cmd);
case ETHTOOL_SRXCLSRLDEL:
@@ -2506,6 +2505,8 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.get_rxfh_key_size = nfp_net_get_rxfh_key_size,
.get_rxfh = nfp_net_get_rxfh,
.set_rxfh = nfp_net_set_rxfh,
+ .get_rxfh_fields = nfp_net_get_rxfh_fields,
+ .set_rxfh_fields = nfp_net_set_rxfh_fields,
.get_regs_len = nfp_net_get_regs_len,
.get_regs = nfp_net_get_regs,
.set_dump = nfp_app_set_dump,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index 4c377bdc62c8..136bfa3516d0 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -409,6 +409,7 @@ static void ionic_remove(struct pci_dev *pdev)
timer_shutdown_sync(&ionic->watchdog_timer);
if (ionic->lif) {
+ cancel_work_sync(&ionic->lif->deferred.work);
/* prevent adminq cmds if already known as down */
if (test_and_clear_bit(IONIC_LIF_F_FW_RESET, ionic->lif->state))
set_bit(IONIC_LIF_F_FW_STOPPING, ionic->lif->state);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 18b9c8a810ae..093c5358b6e8 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -424,9 +424,9 @@ do_check_time:
if (fw_hb_ready != idev->fw_hb_ready) {
idev->fw_hb_ready = fw_hb_ready;
if (!fw_hb_ready)
- dev_info(ionic->dev, "FW heartbeat stalled at %d\n", fw_hb);
+ dev_info(ionic->dev, "FW heartbeat stalled at %u\n", fw_hb);
else
- dev_info(ionic->dev, "FW heartbeat restored at %d\n", fw_hb);
+ dev_info(ionic->dev, "FW heartbeat restored at %u\n", fw_hb);
}
if (!fw_hb_ready)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index f1ddbe9994a3..9886cd66ce68 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -1074,7 +1074,7 @@ struct ionic_rxq_sg_desc {
* first IPv4 header. If the receive packet
* contains both a tunnel IPv4 header and a
* transport IPv4 header, the device validates the
- * checksum for the both IPv4 headers.
+ * checksum for both IPv4 headers.
*
* IONIC_RXQ_COMP_CSUM_F_IP_BAD:
* The IPv4 checksum calculated by the device did
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 7707a9e53c43..48cb5d30b5f6 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -3526,10 +3526,6 @@ void ionic_lif_free(struct ionic_lif *lif)
lif->info = NULL;
lif->info_pa = 0;
- /* unmap doorbell page */
- ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
- lif->kern_dbpage = NULL;
-
mutex_destroy(&lif->config_lock);
mutex_destroy(&lif->queue_lock);
@@ -3555,6 +3551,9 @@ void ionic_lif_deinit(struct ionic_lif *lif)
ionic_lif_qcq_deinit(lif, lif->notifyqcq);
ionic_lif_qcq_deinit(lif, lif->adminqcq);
+ ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
+ lif->kern_dbpage = NULL;
+
ionic_lif_reset(lif);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_phc.c b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
index 7505efdff8e9..9f5c81d44f99 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_phc.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
@@ -290,7 +290,7 @@ static u64 ionic_hwstamp_read(struct ionic *ionic,
return (u64)tick_low | ((u64)tick_high << 32);
}
-static u64 ionic_cc_read(const struct cyclecounter *cc)
+static u64 ionic_cc_read(struct cyclecounter *cc)
{
struct ionic_phc *phc = container_of(cc, struct ionic_phc, cc);
struct ionic *ionic = phc->lif->ionic;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 2ac59564ded1..d10b58ebf603 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -321,7 +321,7 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
len, DMA_TO_DEVICE);
} else /* XDP_REDIRECT */ {
dma_addr = ionic_tx_map_single(q, frame->data, len);
- if (!dma_addr)
+ if (dma_addr == DMA_MAPPING_ERROR)
return -EIO;
}
@@ -357,7 +357,7 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
} else {
dma_addr = ionic_tx_map_frag(q, frag, 0,
skb_frag_size(frag));
- if (dma_mapping_error(q->dev, dma_addr)) {
+ if (dma_addr == DMA_MAPPING_ERROR) {
ionic_tx_desc_unmap_bufs(q, desc_info);
return -EIO;
}
@@ -1083,7 +1083,7 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
net_warn_ratelimited("%s: DMA single map failed on %s!\n",
dev_name(dev), q->name);
q_to_tx_stats(q)->dma_map_err++;
- return 0;
+ return DMA_MAPPING_ERROR;
}
return dma_addr;
}
@@ -1100,7 +1100,7 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
dev_name(dev), q->name);
q_to_tx_stats(q)->dma_map_err++;
- return 0;
+ return DMA_MAPPING_ERROR;
}
return dma_addr;
}
@@ -1116,7 +1116,7 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
int frag_idx;
dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
- if (!dma_addr)
+ if (dma_addr == DMA_MAPPING_ERROR)
return -EIO;
buf_info->dma_addr = dma_addr;
buf_info->len = skb_headlen(skb);
@@ -1126,7 +1126,7 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
nfrags = skb_shinfo(skb)->nr_frags;
for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) {
dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
- if (!dma_addr)
+ if (dma_addr == DMA_MAPPING_ERROR)
goto dma_fail;
buf_info->dma_addr = dma_addr;
buf_info->len = skb_frag_size(frag);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 3383ee1dad14..e8ff661fa4a5 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2946,15 +2946,15 @@ static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj,
static const struct bin_attribute bin_attr_crb = {
.attr = { .name = "crb", .mode = 0644 },
.size = 0,
- .read_new = netxen_sysfs_read_crb,
- .write_new = netxen_sysfs_write_crb,
+ .read = netxen_sysfs_read_crb,
+ .write = netxen_sysfs_write_crb,
};
static const struct bin_attribute bin_attr_mem = {
.attr = { .name = "mem", .mode = 0644 },
.size = 0,
- .read_new = netxen_sysfs_read_mem,
- .write_new = netxen_sysfs_write_mem,
+ .read = netxen_sysfs_read_mem,
+ .write = netxen_sysfs_write_mem,
};
static ssize_t
@@ -3082,7 +3082,7 @@ out:
static const struct bin_attribute bin_attr_dimm = {
.attr = { .name = "dimm", .mode = 0644 },
.size = sizeof(struct netxen_dimm_cfg),
- .read_new = netxen_sysfs_read_dimm,
+ .read = netxen_sysfs_read_dimm,
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 9659ce5b0712..f3d2b2b3bad5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -2216,7 +2216,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
}
/* CID map / ILT shadow table / T2
- * The talbes sizes are determined by the computations above
+ * The table sizes are determined by the computations above
*/
rc = qed_cxt_tables_alloc(p_hwfn);
if (rc)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
index f55eed092f25..7d78f072b0a1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
@@ -242,7 +242,7 @@ static int qed_mfw_get_tlv_group(u8 tlv_type, u8 *tlv_group)
}
/* Returns size of the data buffer or, -1 in case TLV data is not available. */
-static int
+static noinline_for_stack int
qed_mfw_get_gen_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
struct qed_mfw_tlv_generic *p_drv_buf,
struct qed_tlv_parsed_buf *p_buf)
@@ -304,7 +304,7 @@ qed_mfw_get_gen_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
return -1;
}
-static int
+static noinline_for_stack int
qed_mfw_get_eth_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
struct qed_mfw_tlv_eth *p_drv_buf,
struct qed_tlv_parsed_buf *p_buf)
@@ -438,7 +438,7 @@ qed_mfw_get_tlv_time_value(struct qed_mfw_tlv_time *p_time,
return QED_MFW_TLV_TIME_SIZE;
}
-static int
+static noinline_for_stack int
qed_mfw_get_fcoe_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
struct qed_mfw_tlv_fcoe *p_drv_buf,
struct qed_tlv_parsed_buf *p_buf)
@@ -1073,7 +1073,7 @@ qed_mfw_get_fcoe_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
return -1;
}
-static int
+static noinline_for_stack int
qed_mfw_get_iscsi_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
struct qed_mfw_tlv_iscsi *p_drv_buf,
struct qed_tlv_parsed_buf *p_buf)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
index 295ce435a1a4..4df8a97b717e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
@@ -307,7 +307,7 @@ static int qed_ptp_hw_adjfreq(struct qed_dev *cdev, s32 ppb)
} else if (ppb == 1) {
/* This is a special case as its the only value which wouldn't
* fit in a s64 variable. In order to prevent castings simple
- * handle it seperately.
+ * handle it separately.
*/
best_val = 4;
best_period = 0xee6b27f;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index e50e1df0a433..23982704273c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -1168,8 +1168,11 @@ static int qede_set_phys_id(struct net_device *dev,
return 0;
}
-static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
+static int qede_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
{
+ struct qede_dev *edev = netdev_priv(dev);
+
info->data = RXH_IP_SRC | RXH_IP_DST;
switch (info->flow_type) {
@@ -1206,9 +1209,6 @@ static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
case ETHTOOL_GRXRINGS:
info->data = QEDE_RSS_COUNT(edev);
break;
- case ETHTOOL_GRXFH:
- rc = qede_get_rss_flags(edev, info);
- break;
case ETHTOOL_GRXCLSRLCNT:
info->rule_cnt = qede_get_arfs_filter_count(edev);
info->data = QEDE_RFS_MAX_FLTR;
@@ -1227,14 +1227,17 @@ static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
return rc;
}
-static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
+static int qede_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *info,
+ struct netlink_ext_ack *extack)
{
struct qed_update_vport_params *vport_update_params;
+ struct qede_dev *edev = netdev_priv(dev);
u8 set_caps = 0, clr_caps = 0;
int rc = 0;
DP_VERBOSE(edev, QED_MSG_DEBUG,
- "Set rss flags command parameters: flow type = %d, data = %llu\n",
+ "Set rss flags command parameters: flow type = %d, data = %u\n",
info->flow_type, info->data);
switch (info->flow_type) {
@@ -1337,9 +1340,6 @@ static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
int rc;
switch (info->cmd) {
- case ETHTOOL_SRXFH:
- rc = qede_set_rss_flags(edev, info);
- break;
case ETHTOOL_SRXCLSRLINS:
rc = qede_add_cls_rule(edev, info);
break;
@@ -2293,6 +2293,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
.get_rxfh_key_size = qede_get_rxfh_key_size,
.get_rxfh = qede_get_rxfh,
.set_rxfh = qede_set_rxfh,
+ .get_rxfh_fields = qede_get_rxfh_fields,
+ .set_rxfh_fields = qede_set_rxfh_fields,
.get_ts_info = qede_get_ts_info,
.get_channels = qede_get_channels,
.set_channels = qede_set_channels,
@@ -2335,6 +2337,8 @@ static const struct ethtool_ops qede_vf_ethtool_ops = {
.get_rxfh_key_size = qede_get_rxfh_key_size,
.get_rxfh = qede_get_rxfh,
.set_rxfh = qede_set_rxfh,
+ .get_rxfh_fields = qede_get_rxfh_fields,
+ .set_rxfh_fields = qede_set_rxfh_fields,
.get_channels = qede_get_channels,
.set_channels = qede_set_channels,
.get_per_queue_coalesce = qede_get_per_coalesce,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index 985026dd816f..7e341e026489 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -987,20 +987,17 @@ static int qede_udp_tunnel_sync(struct net_device *dev, unsigned int table)
static const struct udp_tunnel_nic_info qede_udp_tunnels_both = {
.sync_table = qede_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
},
}, qede_udp_tunnels_vxlan = {
.sync_table = qede_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
},
}, qede_udp_tunnels_geneve = {
.sync_table = qede_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
},
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 9d6399a5c780..a38f1e72c62b 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -181,7 +181,7 @@ static void qede_ptp_task(struct work_struct *work)
}
/* Read the PHC. This API is invoked with ptp_lock held. */
-static u64 qede_ptp_read_cc(const struct cyclecounter *cc)
+static u64 qede_ptp_read_cc(struct cyclecounter *cc)
{
struct qede_dev *edev;
struct qede_ptp *ptp;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index aee4e63b4b82..fca94a69c777 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -1501,7 +1501,7 @@ static int ql_finish_auto_neg(struct ql3_adapter *qdev)
"Remote error detected. Calling ql_port_start()\n");
/*
* ql_port_start() is shared code and needs
- * to lock the PHY on it's own.
+ * to lock the PHY on its own.
*/
ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
if (ql_port_start(qdev)) /* Restart port */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index b733374b4dc5..6145252d8ff8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -2051,7 +2051,7 @@ static void qlcnic_83xx_init_hw(struct qlcnic_adapter *p_dev)
dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
}
-/* POST FW related definations*/
+/* POST FW related definitions*/
#define QLC_83XX_POST_SIGNATURE_REG 0x41602014
#define QLC_83XX_POST_MODE_REG 0x41602018
#define QLC_83XX_POST_FAST_MODE 0
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index eb69121df726..53cdd36c4123 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -486,7 +486,6 @@ static int qlcnic_udp_tunnel_sync(struct net_device *dev, unsigned int table)
static const struct udp_tunnel_nic_info qlcnic_udp_tunnels = {
.sync_table = qlcnic_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
},
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index c0f20464fd1e..5296d9a6ee83 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -1195,63 +1195,63 @@ static const struct device_attribute dev_attr_beacon = {
static const struct bin_attribute bin_attr_crb = {
.attr = { .name = "crb", .mode = 0644 },
.size = 0,
- .read_new = qlcnic_sysfs_read_crb,
- .write_new = qlcnic_sysfs_write_crb,
+ .read = qlcnic_sysfs_read_crb,
+ .write = qlcnic_sysfs_write_crb,
};
static const struct bin_attribute bin_attr_mem = {
.attr = { .name = "mem", .mode = 0644 },
.size = 0,
- .read_new = qlcnic_sysfs_read_mem,
- .write_new = qlcnic_sysfs_write_mem,
+ .read = qlcnic_sysfs_read_mem,
+ .write = qlcnic_sysfs_write_mem,
};
static const struct bin_attribute bin_attr_npar_config = {
.attr = { .name = "npar_config", .mode = 0644 },
.size = 0,
- .read_new = qlcnic_sysfs_read_npar_config,
- .write_new = qlcnic_sysfs_write_npar_config,
+ .read = qlcnic_sysfs_read_npar_config,
+ .write = qlcnic_sysfs_write_npar_config,
};
static const struct bin_attribute bin_attr_pci_config = {
.attr = { .name = "pci_config", .mode = 0644 },
.size = 0,
- .read_new = qlcnic_sysfs_read_pci_config,
+ .read = qlcnic_sysfs_read_pci_config,
};
static const struct bin_attribute bin_attr_port_stats = {
.attr = { .name = "port_stats", .mode = 0644 },
.size = 0,
- .read_new = qlcnic_sysfs_get_port_stats,
- .write_new = qlcnic_sysfs_clear_port_stats,
+ .read = qlcnic_sysfs_get_port_stats,
+ .write = qlcnic_sysfs_clear_port_stats,
};
static const struct bin_attribute bin_attr_esw_stats = {
.attr = { .name = "esw_stats", .mode = 0644 },
.size = 0,
- .read_new = qlcnic_sysfs_get_esw_stats,
- .write_new = qlcnic_sysfs_clear_esw_stats,
+ .read = qlcnic_sysfs_get_esw_stats,
+ .write = qlcnic_sysfs_clear_esw_stats,
};
static const struct bin_attribute bin_attr_esw_config = {
.attr = { .name = "esw_config", .mode = 0644 },
.size = 0,
- .read_new = qlcnic_sysfs_read_esw_config,
- .write_new = qlcnic_sysfs_write_esw_config,
+ .read = qlcnic_sysfs_read_esw_config,
+ .write = qlcnic_sysfs_write_esw_config,
};
static const struct bin_attribute bin_attr_pm_config = {
.attr = { .name = "pm_config", .mode = 0644 },
.size = 0,
- .read_new = qlcnic_sysfs_read_pm_config,
- .write_new = qlcnic_sysfs_write_pm_config,
+ .read = qlcnic_sysfs_read_pm_config,
+ .write = qlcnic_sysfs_write_pm_config,
};
static const struct bin_attribute bin_attr_flash = {
.attr = { .name = "flash", .mode = 0644 },
.size = 0,
- .read_new = qlcnic_83xx_sysfs_flash_read_handler,
- .write_new = qlcnic_83xx_sysfs_flash_write_handler,
+ .read = qlcnic_83xx_sysfs_flash_read_handler,
+ .write = qlcnic_83xx_sysfs_flash_write_handler,
};
#ifdef CONFIG_QLCNIC_HWMON
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
index a508ebc4b206..28b3a7071e58 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
@@ -419,7 +419,7 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
goto error_put_device;
}
- /* v2 SGMII has a per-lane digital digital, so parse it if it exists */
+ /* v2 SGMII has a per-lane digital, so parse it if it exists */
res = platform_get_resource(sgmii_pdev, IORESOURCE_MEM, 1);
if (res) {
phy->digital = ioremap(res->start, resource_size(res));
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 43170500d566..9c601f271c02 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -216,8 +216,6 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_VDEVICE(REALTEK, 0x8168) },
{ PCI_VDEVICE(NCUBE, 0x8168) },
{ PCI_VDEVICE(REALTEK, 0x8169) },
- { PCI_VENDOR_ID_DLINK, 0x4300,
- PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0 },
{ PCI_VDEVICE(DLINK, 0x4300) },
{ PCI_VDEVICE(DLINK, 0x4302) },
{ PCI_VDEVICE(AT, 0xc107) },
@@ -5262,7 +5260,6 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
if (tp->mac_version == RTL_GIGA_MAC_VER_61)
phy_disable_eee_mode(tp->phydev,
ETHTOOL_LINK_MODE_2500baseT_Full_BIT);
- phy_disable_eee_mode(tp->phydev, ETHTOOL_LINK_MODE_5000baseT_Full_BIT);
/* PHY will be woken up in rtl_open() */
phy_suspend(tp->phydev);
diff --git a/drivers/net/ethernet/realtek/rtase/rtase.h b/drivers/net/ethernet/realtek/rtase/rtase.h
index 498cfe4d0cac..20decdeb9fdb 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase.h
+++ b/drivers/net/ethernet/realtek/rtase/rtase.h
@@ -288,6 +288,7 @@ struct rtase_ring {
u32 cur_idx;
u32 dirty_idx;
u16 index;
+ u8 type;
struct sk_buff *skbuff[RTASE_NUM_DESC];
void *data_buf[RTASE_NUM_DESC];
diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c
index 4d37217e9a14..ef13109c49cf 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase_main.c
+++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c
@@ -326,6 +326,7 @@ static void rtase_tx_desc_init(struct rtase_private *tp, u16 idx)
ring->cur_idx = 0;
ring->dirty_idx = 0;
ring->index = idx;
+ ring->type = NETDEV_QUEUE_TYPE_TX;
ring->alloc_fail = 0;
for (i = 0; i < RTASE_NUM_DESC; i++) {
@@ -345,6 +346,9 @@ static void rtase_tx_desc_init(struct rtase_private *tp, u16 idx)
ring->ivec = &tp->int_vector[0];
list_add_tail(&ring->ring_entry, &tp->int_vector[0].ring_list);
}
+
+ netif_queue_set_napi(tp->dev, ring->index,
+ ring->type, &ring->ivec->napi);
}
static void rtase_map_to_asic(union rtase_rx_desc *desc, dma_addr_t mapping,
@@ -590,6 +594,7 @@ static void rtase_rx_desc_init(struct rtase_private *tp, u16 idx)
ring->cur_idx = 0;
ring->dirty_idx = 0;
ring->index = idx;
+ ring->type = NETDEV_QUEUE_TYPE_RX;
ring->alloc_fail = 0;
for (i = 0; i < RTASE_NUM_DESC; i++)
@@ -597,6 +602,8 @@ static void rtase_rx_desc_init(struct rtase_private *tp, u16 idx)
ring->ring_handler = rx_handler;
ring->ivec = &tp->int_vector[idx];
+ netif_queue_set_napi(tp->dev, ring->index,
+ ring->type, &ring->ivec->napi);
list_add_tail(&ring->ring_entry, &tp->int_vector[idx].ring_list);
}
@@ -1161,8 +1168,12 @@ static void rtase_down(struct net_device *dev)
ivec = &tp->int_vector[i];
napi_disable(&ivec->napi);
list_for_each_entry_safe(ring, tmp, &ivec->ring_list,
- ring_entry)
+ ring_entry) {
+ netif_queue_set_napi(tp->dev, ring->index,
+ ring->type, NULL);
+
list_del(&ring->ring_entry);
+ }
}
netif_tx_disable(dev);
@@ -1518,8 +1529,12 @@ static void rtase_sw_reset(struct net_device *dev)
for (i = 0; i < tp->int_nums; i++) {
ivec = &tp->int_vector[i];
list_for_each_entry_safe(ring, tmp, &ivec->ring_list,
- ring_entry)
+ ring_entry) {
+ netif_queue_set_napi(tp->dev, ring->index,
+ ring->type, NULL);
+
list_del(&ring->ring_entry);
+ }
}
ret = rtase_init_ring(dev);
@@ -1871,6 +1886,18 @@ static void rtase_init_netdev_ops(struct net_device *dev)
dev->ethtool_ops = &rtase_ethtool_ops;
}
+static void rtase_init_napi(struct rtase_private *tp)
+{
+ u16 i;
+
+ for (i = 0; i < tp->int_nums; i++) {
+ netif_napi_add_config(tp->dev, &tp->int_vector[i].napi,
+ tp->int_vector[i].poll, i);
+ netif_napi_set_irq(&tp->int_vector[i].napi,
+ tp->int_vector[i].irq);
+ }
+}
+
static void rtase_reset_interrupt(struct pci_dev *pdev,
const struct rtase_private *tp)
{
@@ -1956,9 +1983,6 @@ static void rtase_init_int_vector(struct rtase_private *tp)
memset(tp->int_vector[0].name, 0x0, sizeof(tp->int_vector[0].name));
INIT_LIST_HEAD(&tp->int_vector[0].ring_list);
- netif_napi_add(tp->dev, &tp->int_vector[0].napi,
- tp->int_vector[0].poll);
-
/* interrupt vector 1 ~ 3 */
for (i = 1; i < tp->int_nums; i++) {
tp->int_vector[i].tp = tp;
@@ -1972,9 +1996,6 @@ static void rtase_init_int_vector(struct rtase_private *tp)
memset(tp->int_vector[i].name, 0x0,
sizeof(tp->int_vector[0].name));
INIT_LIST_HEAD(&tp->int_vector[i].ring_list);
-
- netif_napi_add(tp->dev, &tp->int_vector[i].napi,
- tp->int_vector[i].poll);
}
}
@@ -2206,6 +2227,8 @@ static int rtase_init_one(struct pci_dev *pdev,
goto err_out_del_napi;
}
+ rtase_init_napi(tp);
+
rtase_init_netdev_ops(dev);
dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index c9f4976a3527..94b6fb94f8f1 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -3075,7 +3075,7 @@ static int ravb_probe(struct platform_device *pdev)
if (info->coalesce_irqs) {
netdev_sw_irq_coalesce_default_on(ndev);
if (num_present_cpus() == 1)
- dev_set_threaded(ndev, true);
+ netif_threaded_enable(ndev);
}
/* Network device register */
diff --git a/drivers/net/ethernet/renesas/rtsn.c b/drivers/net/ethernet/renesas/rtsn.c
index 6b3f7fca8d15..05c4b6c8c9c3 100644
--- a/drivers/net/ethernet/renesas/rtsn.c
+++ b/drivers/net/ethernet/renesas/rtsn.c
@@ -1259,7 +1259,12 @@ static int rtsn_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
priv->pdev = pdev;
priv->ndev = ndev;
+
priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
+ if (!priv->ptp_priv) {
+ ret = -ENOMEM;
+ goto error_free;
+ }
spin_lock_init(&priv->lock);
platform_set_drvdata(pdev, priv);
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
index 4a439b34114d..ad73733644f9 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -308,8 +308,8 @@ static int sxgbe_set_coalesce(struct net_device *dev,
return 0;
}
-static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv,
- struct ethtool_rxnfc *cmd)
+static int sxgbe_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
cmd->data = 0;
@@ -344,26 +344,11 @@ static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv,
return 0;
}
-static int sxgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
+static int sxgbe_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_GRXFH:
- ret = sxgbe_get_rss_hash_opts(priv, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
-static int sxgbe_set_rss_hash_opt(struct sxgbe_priv_data *priv,
- struct ethtool_rxnfc *cmd)
-{
u32 reg_val = 0;
/* RSS does not support anything other than hashing
@@ -421,22 +406,6 @@ static int sxgbe_set_rss_hash_opt(struct sxgbe_priv_data *priv,
return 0;
}
-static int sxgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
-{
- struct sxgbe_priv_data *priv = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = sxgbe_set_rss_hash_opt(priv, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static void sxgbe_get_regs(struct net_device *dev,
struct ethtool_regs *regs, void *space)
{
@@ -489,8 +458,8 @@ static const struct ethtool_ops sxgbe_ethtool_ops = {
.get_channels = sxgbe_get_channels,
.get_coalesce = sxgbe_get_coalesce,
.set_coalesce = sxgbe_set_coalesce,
- .get_rxnfc = sxgbe_get_rxnfc,
- .set_rxnfc = sxgbe_set_rxnfc,
+ .get_rxfh_fields = sxgbe_get_rxfh_fields,
+ .set_rxfh_fields = sxgbe_set_rxfh_fields,
.get_regs = sxgbe_get_regs,
.get_regs_len = sxgbe_get_regs_len,
.get_eee = sxgbe_get_eee,
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 47349c148c0c..fcec81f862ec 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -3985,7 +3985,6 @@ static int efx_ef10_udp_tnl_unset_port(struct net_device *dev,
static const struct udp_tunnel_nic_info efx_ef10_udp_tunnels = {
.set_port = efx_ef10_udp_tnl_set_port,
.unset_port = efx_ef10_udp_tnl_unset_port,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
.tables = {
{
.n_entries = 16,
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 83d715544f7f..23c6a7df78d0 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -262,11 +262,13 @@ const struct ethtool_ops efx_ethtool_ops = {
.set_rxnfc = efx_ethtool_set_rxnfc,
.get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
.get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
+ .rxfh_per_ctx_fields = true,
.rxfh_per_ctx_key = true,
.cap_rss_rxnfc_adds = true,
.rxfh_priv_size = sizeof(struct efx_rss_context_priv),
.get_rxfh = efx_ethtool_get_rxfh,
.set_rxfh = efx_ethtool_set_rxfh,
+ .get_rxfh_fields = efx_ethtool_get_rxfh_fields,
.create_rxfh_context = efx_ethtool_create_rxfh_context,
.modify_rxfh_context = efx_ethtool_modify_rxfh_context,
.remove_rxfh_context = efx_ethtool_remove_rxfh_context,
diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c
index 2d734496733f..fa303e171d98 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/ethtool_common.c
@@ -800,6 +800,56 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
return rc;
}
+int efx_ethtool_get_rxfh_fields(struct net_device *net_dev,
+ struct ethtool_rxfh_fields *info)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+ struct efx_rss_context_priv *ctx;
+ __u64 data;
+ int rc = 0;
+
+ ctx = &efx->rss_context.priv;
+
+ if (info->rss_context) {
+ ctx = efx_find_rss_context_entry(efx, info->rss_context);
+ if (!ctx)
+ return -ENOENT;
+ }
+
+ data = 0;
+ if (!efx_rss_active(ctx)) /* No RSS */
+ goto out_setdata_unlock;
+
+ switch (info->flow_type) {
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ if (ctx->rx_hash_udp_4tuple)
+ data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+ RXH_IP_SRC | RXH_IP_DST);
+ else
+ data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+ RXH_IP_SRC | RXH_IP_DST);
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ break;
+ }
+out_setdata_unlock:
+ info->data = data;
+ return rc;
+}
+
int efx_ethtool_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
@@ -812,55 +862,6 @@ int efx_ethtool_get_rxnfc(struct net_device *net_dev,
info->data = efx->n_rx_channels;
return 0;
- case ETHTOOL_GRXFH: {
- struct efx_rss_context_priv *ctx = &efx->rss_context.priv;
- __u64 data;
-
- mutex_lock(&net_dev->ethtool->rss_lock);
- if (info->flow_type & FLOW_RSS && info->rss_context) {
- ctx = efx_find_rss_context_entry(efx, info->rss_context);
- if (!ctx) {
- rc = -ENOENT;
- goto out_unlock;
- }
- }
-
- data = 0;
- if (!efx_rss_active(ctx)) /* No RSS */
- goto out_setdata_unlock;
-
- switch (info->flow_type & ~FLOW_RSS) {
- case UDP_V4_FLOW:
- case UDP_V6_FLOW:
- if (ctx->rx_hash_udp_4tuple)
- data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
- RXH_IP_SRC | RXH_IP_DST);
- else
- data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case TCP_V4_FLOW:
- case TCP_V6_FLOW:
- data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
- RXH_IP_SRC | RXH_IP_DST);
- break;
- case SCTP_V4_FLOW:
- case SCTP_V6_FLOW:
- case AH_ESP_V4_FLOW:
- case AH_ESP_V6_FLOW:
- case IPV4_FLOW:
- case IPV6_FLOW:
- data = RXH_IP_SRC | RXH_IP_DST;
- break;
- default:
- break;
- }
-out_setdata_unlock:
- info->data = data;
-out_unlock:
- mutex_unlock(&net_dev->ethtool->rss_lock);
- return rc;
- }
-
case ETHTOOL_GRXCLSRLCNT:
info->data = efx_filter_get_rx_id_limit(efx);
if (info->data == 0)
diff --git a/drivers/net/ethernet/sfc/ethtool_common.h b/drivers/net/ethernet/sfc/ethtool_common.h
index fc52e891637d..24db4fccbe78 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.h
+++ b/drivers/net/ethernet/sfc/ethtool_common.h
@@ -49,6 +49,8 @@ int efx_ethtool_get_rxfh(struct net_device *net_dev,
int efx_ethtool_set_rxfh(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack);
+int efx_ethtool_get_rxfh_fields(struct net_device *net_dev,
+ struct ethtool_rxfh_fields *info);
int efx_ethtool_create_rxfh_context(struct net_device *net_dev,
struct ethtool_rxfh_context *ctx,
const struct ethtool_rxfh_param *rxfh,
diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c
index 04766448a545..27d1cd6f24ca 100644
--- a/drivers/net/ethernet/sfc/falcon/ethtool.c
+++ b/drivers/net/ethernet/sfc/falcon/ethtool.c
@@ -944,6 +944,37 @@ static int ef4_ethtool_get_class_rule(struct ef4_nic *efx,
}
static int
+ef4_ethtool_get_rxfh_fields(struct net_device *net_dev,
+ struct ethtool_rxfh_fields *info)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+
+ info->data = 0;
+ /* Falcon A0 and A1 had a 4-tuple hash for TCP and UDP, but it was
+ * broken so we do not enable it.
+ * Falcon B0 adds a Toeplitz hash, 4-tuple for TCP and 2-tuple for
+ * other IPv4, including UDP.
+ * See falcon_init_rx_cfg().
+ */
+ if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0)
+ return 0;
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case IPV4_FLOW:
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int
ef4_ethtool_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
@@ -954,29 +985,6 @@ ef4_ethtool_get_rxnfc(struct net_device *net_dev,
info->data = efx->n_rx_channels;
return 0;
- case ETHTOOL_GRXFH: {
- unsigned min_revision = 0;
-
- info->data = 0;
- switch (info->flow_type) {
- case TCP_V4_FLOW:
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- fallthrough;
- case UDP_V4_FLOW:
- case SCTP_V4_FLOW:
- case AH_ESP_V4_FLOW:
- case IPV4_FLOW:
- info->data |= RXH_IP_SRC | RXH_IP_DST;
- min_revision = EF4_REV_FALCON_B0;
- break;
- default:
- break;
- }
- if (ef4_nic_rev(efx) < min_revision)
- info->data = 0;
- return 0;
- }
-
case ETHTOOL_GRXCLSRLCNT:
info->data = ef4_filter_get_rx_id_limit(efx);
if (info->data == 0)
@@ -1343,6 +1351,7 @@ const struct ethtool_ops ef4_ethtool_ops = {
.get_rxfh_indir_size = ef4_ethtool_get_rxfh_indir_size,
.get_rxfh = ef4_ethtool_get_rxfh,
.set_rxfh = ef4_ethtool_set_rxfh,
+ .get_rxfh_fields = ef4_ethtool_get_rxfh_fields,
.get_module_info = ef4_ethtool_get_module_info,
.get_module_eeprom = ef4_ethtool_get_module_eeprom,
.get_link_ksettings = ef4_ethtool_get_link_ksettings,
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 9cb339c461fb..b9866e389e6d 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -9190,7 +9190,7 @@
/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS
* Get descriptions for a set of sensors, specified as an array of sensor
* handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST. Any handles which do not
- * correspond to a sensor currently managed by the MC will be dropped from from
+ * correspond to a sensor currently managed by the MC will be dropped from
* the response. This may happen when a sensor table update is in progress, and
* effectively means the set of usable sensors is the intersection between the
* sets of sensors known to the driver and the MC. On Riverhead this command is
@@ -9236,7 +9236,7 @@
* broken sensor, then the state of the response's MC_CMD_DYNAMIC_SENSORS_VALUE
* entry will be set to BROKEN, and any value provided should be treated as
* erroneous. Any handles which do not correspond to a sensor currently managed
- * by the MC will be dropped from from the response. This may happen when a
+ * by the MC will be dropped from the response. This may happen when a
* sensor table update is in progress, and effectively means the set of usable
* sensors is the intersection between the sets of sensors known to the driver
* and the MC. On Riverhead this command is implemented as a wrapper for
@@ -22487,7 +22487,7 @@
* the named interface itself - INTF=..., PF=..., VF=VF_NULL to refer to a PF
* on a named interface - INTF=..., PF=..., VF=... to refer to a VF on a named
* interface where ... refers to a small integer for the VF/PF fields, and to
- * values from the PCIE_INTERFACE enum for for the INTF field. It's only
+ * values from the PCIE_INTERFACE enum for the INTF field. It's only
* meaningful to use INTF=CALLER within a structure that's an argument to
* MC_CMD_DEVEL_GET_CLIENT_HANDLE.
*/
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 5c0f306fb019..b98c259f672d 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -404,7 +404,6 @@ struct efx_rx_page_state {
* @old_rx_packets: Value of @rx_packets as of last efx_init_rx_queue()
* @old_rx_bytes: Value of @rx_bytes as of last efx_init_rx_queue()
* @xdp_rxq_info: XDP specific RX queue information.
- * @xdp_rxq_info_valid: Is xdp_rxq_info valid data?.
*/
struct efx_rx_queue {
struct efx_nic *efx;
@@ -443,7 +442,6 @@ struct efx_rx_queue {
unsigned long old_rx_packets;
unsigned long old_rx_bytes;
struct xdp_rxq_info xdp_rxq_info;
- bool xdp_rxq_info_valid;
};
enum efx_sync_events_state {
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index f4f75299dfa9..5306f4c44be4 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -269,8 +269,6 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
"Failure to initialise XDP queue information rc=%d\n",
rc);
efx->xdp_rxq_info_failed = true;
- } else {
- rx_queue->xdp_rxq_info_valid = true;
}
/* Set up RX descriptor ring */
@@ -302,10 +300,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
efx_fini_rx_recycle_ring(rx_queue);
- if (rx_queue->xdp_rxq_info_valid)
+ if (xdp_rxq_info_is_reg(&rx_queue->xdp_rxq_info))
xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
-
- rx_queue->xdp_rxq_info_valid = false;
}
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
diff --git a/drivers/net/ethernet/sfc/siena/ethtool.c b/drivers/net/ethernet/sfc/siena/ethtool.c
index c5ad84db9613..994909789bfe 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool.c
+++ b/drivers/net/ethernet/sfc/siena/ethtool.c
@@ -264,6 +264,7 @@ const struct ethtool_ops efx_siena_ethtool_ops = {
.get_rxfh_key_size = efx_siena_ethtool_get_rxfh_key_size,
.get_rxfh = efx_siena_ethtool_get_rxfh,
.set_rxfh = efx_siena_ethtool_set_rxfh,
+ .get_rxfh_fields = efx_siena_ethtool_get_rxfh_fields,
.get_ts_info = efx_ethtool_get_ts_info,
.get_module_info = efx_siena_ethtool_get_module_info,
.get_module_eeprom = efx_siena_ethtool_get_module_eeprom,
diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.c b/drivers/net/ethernet/sfc/siena/ethtool_common.c
index eeee676fdca7..47cd16a113cf 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/siena/ethtool_common.c
@@ -801,6 +801,46 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
return rc;
}
+int efx_siena_ethtool_get_rxfh_fields(struct net_device *net_dev,
+ struct ethtool_rxfh_fields *info)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ __u64 data;
+
+ data = 0;
+ if (!efx_rss_active(&efx->rss_context)) /* No RSS */
+ goto out_setdata;
+
+ switch (info->flow_type) {
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ if (efx->rss_context.rx_hash_udp_4tuple)
+ data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+ RXH_IP_SRC | RXH_IP_DST);
+ else
+ data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+ RXH_IP_SRC | RXH_IP_DST);
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ break;
+ }
+out_setdata:
+ info->data = data;
+ return 0;
+}
+
int efx_siena_ethtool_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
@@ -813,43 +853,6 @@ int efx_siena_ethtool_get_rxnfc(struct net_device *net_dev,
info->data = efx->n_rx_channels;
return 0;
- case ETHTOOL_GRXFH: {
- __u64 data;
-
- data = 0;
- if (!efx_rss_active(&efx->rss_context)) /* No RSS */
- goto out_setdata;
-
- switch (info->flow_type) {
- case UDP_V4_FLOW:
- case UDP_V6_FLOW:
- if (efx->rss_context.rx_hash_udp_4tuple)
- data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
- RXH_IP_SRC | RXH_IP_DST);
- else
- data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case TCP_V4_FLOW:
- case TCP_V6_FLOW:
- data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
- RXH_IP_SRC | RXH_IP_DST);
- break;
- case SCTP_V4_FLOW:
- case SCTP_V6_FLOW:
- case AH_ESP_V4_FLOW:
- case AH_ESP_V6_FLOW:
- case IPV4_FLOW:
- case IPV6_FLOW:
- data = RXH_IP_SRC | RXH_IP_DST;
- break;
- default:
- break;
- }
-out_setdata:
- info->data = data;
- return rc;
- }
-
case ETHTOOL_GRXCLSRLCNT:
info->data = efx_filter_get_rx_id_limit(efx);
if (info->data == 0)
diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.h b/drivers/net/ethernet/sfc/siena/ethtool_common.h
index d674bab0f65b..278d69e920d9 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool_common.h
+++ b/drivers/net/ethernet/sfc/siena/ethtool_common.h
@@ -46,6 +46,8 @@ int efx_siena_ethtool_get_rxfh(struct net_device *net_dev,
int efx_siena_ethtool_set_rxfh(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack);
+int efx_siena_ethtool_get_rxfh_fields(struct net_device *net_dev,
+ struct ethtool_rxfh_fields *info);
int efx_siena_ethtool_reset(struct net_device *net_dev, u32 *flags);
int efx_siena_ethtool_get_module_eeprom(struct net_device *net_dev,
struct ethtool_eeprom *ee,
diff --git a/drivers/net/ethernet/sfc/siena/farch.c b/drivers/net/ethernet/sfc/siena/farch.c
index 89ccd65c978b..562a038e38a7 100644
--- a/drivers/net/ethernet/sfc/siena/farch.c
+++ b/drivers/net/ethernet/sfc/siena/farch.c
@@ -1708,7 +1708,7 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
if (efx->vf_count > vf_limit) {
netif_err(efx, probe, efx->net_dev,
- "Reducing VF count from from %d to %d\n",
+ "Reducing VF count from %d to %d\n",
efx->vf_count, vf_limit);
efx->vf_count = vf_limit;
}
diff --git a/drivers/net/ethernet/sfc/siena/mcdi_pcol.h b/drivers/net/ethernet/sfc/siena/mcdi_pcol.h
index a3cc8b7ec732..b81b0aa460d2 100644
--- a/drivers/net/ethernet/sfc/siena/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/siena/mcdi_pcol.h
@@ -6704,16 +6704,16 @@
#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
-/* interpretation is is sensor-specific. */
+/* interpretation is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4
-/* interpretation is is sensor-specific. */
+/* interpretation is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4
-/* interpretation is is sensor-specific. */
+/* interpretation is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4
-/* interpretation is is sensor-specific. */
+/* interpretation is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4
@@ -7823,7 +7823,7 @@
* handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST
*
* Any handles which do not correspond to a sensor currently managed by the MC
- * will be dropped from from the response. This may happen when a sensor table
+ * will be dropped from the response. This may happen when a sensor table
* update is in progress, and effectively means the set of usable sensors is
* the intersection between the sets of sensors known to the driver and the MC.
*
@@ -7872,7 +7872,7 @@
* provided should be treated as erroneous.
*
* Any handles which do not correspond to a sensor currently managed by the MC
- * will be dropped from from the response. This may happen when a sensor table
+ * will be dropped from the response. This may happen when a sensor table
* update is in progress, and effectively means the set of usable sensors is
* the intersection between the sets of sensors known to the driver and the MC.
*
diff --git a/drivers/net/ethernet/sfc/siena/net_driver.h b/drivers/net/ethernet/sfc/siena/net_driver.h
index 2be3bad3c993..4cf556782133 100644
--- a/drivers/net/ethernet/sfc/siena/net_driver.h
+++ b/drivers/net/ethernet/sfc/siena/net_driver.h
@@ -384,7 +384,6 @@ struct efx_rx_page_state {
* @recycle_count: RX buffer recycle counter.
* @slow_fill: Timer used to defer efx_nic_generate_fill_event().
* @xdp_rxq_info: XDP specific RX queue information.
- * @xdp_rxq_info_valid: Is xdp_rxq_info valid data?.
*/
struct efx_rx_queue {
struct efx_nic *efx;
@@ -417,7 +416,6 @@ struct efx_rx_queue {
/* Statistics to supplement MAC stats */
unsigned long rx_packets;
struct xdp_rxq_info xdp_rxq_info;
- bool xdp_rxq_info_valid;
};
enum efx_sync_events_state {
diff --git a/drivers/net/ethernet/sfc/siena/rx_common.c b/drivers/net/ethernet/sfc/siena/rx_common.c
index 98d27174015d..4ae09505e417 100644
--- a/drivers/net/ethernet/sfc/siena/rx_common.c
+++ b/drivers/net/ethernet/sfc/siena/rx_common.c
@@ -268,8 +268,6 @@ void efx_siena_init_rx_queue(struct efx_rx_queue *rx_queue)
"Failure to initialise XDP queue information rc=%d\n",
rc);
efx->xdp_rxq_info_failed = true;
- } else {
- rx_queue->xdp_rxq_info_valid = true;
}
/* Set up RX descriptor ring */
@@ -299,10 +297,8 @@ void efx_siena_fini_rx_queue(struct efx_rx_queue *rx_queue)
efx_fini_rx_recycle_ring(rx_queue);
- if (rx_queue->xdp_rxq_info_valid)
+ if (xdp_rxq_info_is_reg(&rx_queue->xdp_rxq_info))
xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
-
- rx_queue->xdp_rxq_info_valid = false;
}
void efx_siena_remove_rx_queue(struct efx_rx_queue *rx_queue)
diff --git a/drivers/net/ethernet/sfc/tc_encap_actions.c b/drivers/net/ethernet/sfc/tc_encap_actions.c
index 87443f9dfd22..2258f854e5be 100644
--- a/drivers/net/ethernet/sfc/tc_encap_actions.c
+++ b/drivers/net/ethernet/sfc/tc_encap_actions.c
@@ -442,7 +442,7 @@ static void efx_tc_update_encap(struct efx_nic *efx,
rule = container_of(acts, struct efx_tc_flow_rule, acts);
if (rule->fallback)
fallback = rule->fallback;
- else /* fallback fallback: deliver to PF */
+ else /* fallback: deliver to PF */
fallback = &efx->tc->facts.pf;
rc = efx_mae_update_rule(efx, fallback->fw_id,
rule->fw_id);
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 2e1106097965..6ca290f7c0df 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2350,7 +2350,7 @@ static void smsc911x_drv_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
-/* standard register acces */
+/* standard register access */
static const struct smsc911x_ops standard_smsc911x_ops = {
.reg_read = __smsc911x_reg_read,
.reg_write = __smsc911x_reg_write,
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index ea5da5793362..cbffccb3b9af 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -396,17 +396,6 @@ enum request_irq_err {
#define CORE_IRQ_MTL_RX_OVERFLOW BIT(8)
-/* Physical Coding Sublayer */
-struct rgmii_adv {
- unsigned int pause;
- unsigned int duplex;
- unsigned int lp_pause;
- unsigned int lp_duplex;
-};
-
-#define STMMAC_PCS_PAUSE 1
-#define STMMAC_PCS_ASYM_PAUSE 2
-
/* DMA HW capabilities */
struct dma_features {
unsigned int mbps_10_100;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 9a47015254bb..ea33ae39be6b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -433,6 +433,12 @@ static int intel_crosststamp(ktime_t *device,
return -ETIMEDOUT;
}
+ *system = (struct system_counterval_t) {
+ .cycles = 0,
+ .cs_id = CSID_X86_ART,
+ .use_nsecs = false,
+ };
+
num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) &
GMAC_TIMESTAMP_ATSNS_MASK) >>
GMAC_TIMESTAMP_ATSNS_SHIFT;
@@ -448,7 +454,7 @@ static int intel_crosststamp(ktime_t *device,
}
system->cycles *= intel_priv->crossts_adj;
- system->cs_id = CSID_X86_ART;
+
priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c
index 3e86810717d3..32b5d1492e2e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c
@@ -44,24 +44,50 @@
struct ls1x_dwmac {
struct plat_stmmacenet_data *plat_dat;
struct regmap *regmap;
+ unsigned int id;
};
-static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv)
+struct ls1x_data {
+ int (*setup)(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat);
+ int (*init)(struct platform_device *pdev, void *bsp_priv);
+};
+
+static int ls1b_dwmac_setup(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat)
{
- struct ls1x_dwmac *dwmac = priv;
- struct plat_stmmacenet_data *plat = dwmac->plat_dat;
- struct regmap *regmap = dwmac->regmap;
+ struct ls1x_dwmac *dwmac = plat_dat->bsp_priv;
struct resource *res;
- unsigned long reg_base;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
+ /* This shouldn't fail - stmmac_get_platform_resources()
+ * already mapped this resource.
+ */
dev_err(&pdev->dev, "Could not get IO_MEM resources\n");
return -EINVAL;
}
- reg_base = (unsigned long)res->start;
- if (reg_base == LS1B_GMAC0_BASE) {
+ if (res->start == LS1B_GMAC0_BASE) {
+ dwmac->id = 0;
+ } else if (res->start == LS1B_GMAC1_BASE) {
+ dwmac->id = 1;
+ } else {
+ dev_err(&pdev->dev, "Invalid Ethernet MAC base address %pR",
+ res);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv)
+{
+ struct ls1x_dwmac *dwmac = priv;
+ struct plat_stmmacenet_data *plat = dwmac->plat_dat;
+ struct regmap *regmap = dwmac->regmap;
+
+ if (dwmac->id == 0) {
switch (plat->phy_interface) {
case PHY_INTERFACE_MODE_RGMII_ID:
regmap_update_bits(regmap, LS1X_SYSCON0,
@@ -80,7 +106,7 @@ static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv)
}
regmap_update_bits(regmap, LS1X_SYSCON0, GMAC0_SHUT, 0);
- } else if (reg_base == LS1B_GMAC1_BASE) {
+ } else if (dwmac->id == 1) {
regmap_update_bits(regmap, LS1X_SYSCON0,
GMAC1_USE_UART1 | GMAC1_USE_UART0,
GMAC1_USE_UART1 | GMAC1_USE_UART0);
@@ -104,10 +130,6 @@ static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv)
}
regmap_update_bits(regmap, LS1X_SYSCON1, GMAC1_SHUT, 0);
- } else {
- dev_err(&pdev->dev, "Invalid Ethernet MAC base address %lx",
- reg_base);
- return -EINVAL;
}
return 0;
@@ -143,9 +165,9 @@ static int ls1x_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
+ const struct ls1x_data *data;
struct regmap *regmap;
struct ls1x_dwmac *dwmac;
- int (*init)(struct platform_device *pdev, void *priv);
int ret;
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
@@ -159,8 +181,8 @@ static int ls1x_dwmac_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
"Unable to find syscon\n");
- init = of_device_get_match_data(&pdev->dev);
- if (!init) {
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
dev_err(&pdev->dev, "No of match data provided\n");
return -EINVAL;
}
@@ -175,21 +197,36 @@ static int ls1x_dwmac_probe(struct platform_device *pdev)
"dt configuration failed\n");
plat_dat->bsp_priv = dwmac;
- plat_dat->init = init;
+ plat_dat->init = data->init;
dwmac->plat_dat = plat_dat;
dwmac->regmap = regmap;
+ if (data->setup) {
+ ret = data->setup(pdev, plat_dat);
+ if (ret)
+ return ret;
+ }
+
return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
+static const struct ls1x_data ls1b_dwmac_data = {
+ .setup = ls1b_dwmac_setup,
+ .init = ls1b_dwmac_syscon_init,
+};
+
+static const struct ls1x_data ls1c_dwmac_data = {
+ .init = ls1c_dwmac_syscon_init,
+};
+
static const struct of_device_id ls1x_dwmac_match[] = {
{
.compatible = "loongson,ls1b-gmac",
- .data = &ls1b_dwmac_syscon_init,
+ .data = &ls1b_dwmac_data,
},
{
.compatible = "loongson,ls1c-emac",
- .data = &ls1c_dwmac_syscon_init,
+ .data = &ls1c_dwmac_data,
},
{ }
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
index 22653ffd2a04..c0c44916f849 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
@@ -41,6 +41,7 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev)
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
+ plat_dat->mac_interface = PHY_INTERFACE_MODE_NA;
plat_dat->has_gmac = true;
reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
@@ -49,9 +50,9 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev)
return PTR_ERR(reg);
}
- if (plat_dat->mac_interface == PHY_INTERFACE_MODE_MII) {
+ if (plat_dat->phy_interface == PHY_INTERFACE_MODE_MII) {
ethmode = LPC18XX_CREG_CREG6_ETHMODE_MII;
- } else if (plat_dat->mac_interface == PHY_INTERFACE_MODE_RMII) {
+ } else if (plat_dat->phy_interface == PHY_INTERFACE_MODE_RMII) {
ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII;
} else {
dev_err(&pdev->dev, "Only MII and RMII mode supported\n");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index e30bdf72331a..d8fd4d8f6ced 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -622,6 +622,11 @@ static void ethqos_set_serdes_speed(struct qcom_ethqos *ethqos, int speed)
}
}
+static void ethqos_pcs_set_inband(struct stmmac_priv *priv, bool enable)
+{
+ stmmac_pcs_ctrl_ane(priv, enable, 0, 0);
+}
+
/* On interface toggle MAC registers gets reset.
* Configure MAC block for SGMII on ethernet phy link up
*/
@@ -640,7 +645,7 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos, int speed)
RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
RGMII_IO_MACRO_CONFIG2);
ethqos_set_serdes_speed(ethqos, SPEED_2500);
- stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 0, 0, 0);
+ ethqos_pcs_set_inband(priv, false);
break;
case SPEED_1000:
val &= ~ETHQOS_MAC_CTRL_PORT_SEL;
@@ -648,12 +653,12 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos, int speed)
RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
RGMII_IO_MACRO_CONFIG2);
ethqos_set_serdes_speed(ethqos, SPEED_1000);
- stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
+ ethqos_pcs_set_inband(priv, true);
break;
case SPEED_100:
val |= ETHQOS_MAC_CTRL_PORT_SEL | ETHQOS_MAC_CTRL_SPEED_MODE;
ethqos_set_serdes_speed(ethqos, SPEED_1000);
- stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
+ ethqos_pcs_set_inband(priv, true);
break;
case SPEED_10:
val |= ETHQOS_MAC_CTRL_PORT_SEL;
@@ -663,7 +668,7 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos, int speed)
SGMII_10M_RX_CLK_DVDR),
RGMII_IO_MACRO_CONFIG);
ethqos_set_serdes_speed(ethqos, SPEED_1000);
- stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
+ ethqos_pcs_set_inband(priv, true);
break;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
index 9a774046455b..df4ca897a60c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
@@ -136,6 +136,7 @@ static struct platform_driver renesas_gbeth_driver = {
.probe = renesas_gbeth_probe,
.driver = {
.name = "renesas-gbeth",
+ .pm = &stmmac_pltfr_pm_ops,
.of_match_table = renesas_gbeth_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 700858ff6f7c..79b92130a03f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -24,12 +24,21 @@
#include "stmmac_platform.h"
struct rk_priv_data;
+
+struct rk_reg_speed_data {
+ unsigned int rgmii_10;
+ unsigned int rgmii_100;
+ unsigned int rgmii_1000;
+ unsigned int rmii_10;
+ unsigned int rmii_100;
+};
+
struct rk_gmac_ops {
void (*set_to_rgmii)(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay);
void (*set_to_rmii)(struct rk_priv_data *bsp_priv);
- void (*set_rgmii_speed)(struct rk_priv_data *bsp_priv, int speed);
- void (*set_rmii_speed)(struct rk_priv_data *bsp_priv, int speed);
+ int (*set_speed)(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed);
void (*set_clock_selection)(struct rk_priv_data *bsp_priv, bool input,
bool enable);
void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv);
@@ -58,7 +67,7 @@ enum rk_clocks_index {
};
struct rk_priv_data {
- struct platform_device *pdev;
+ struct device *dev;
phy_interface_t phy_iface;
int id;
struct regulator *regulator;
@@ -71,7 +80,6 @@ struct rk_priv_data {
struct clk_bulk_data *clks;
int num_clks;
- struct clk *clk_mac;
struct clk *clk_phy;
struct reset_control *phy_reset;
@@ -83,6 +91,64 @@ struct rk_priv_data {
struct regmap *php_grf;
};
+static int rk_set_reg_speed(struct rk_priv_data *bsp_priv,
+ const struct rk_reg_speed_data *rsd,
+ unsigned int reg, phy_interface_t interface,
+ int speed)
+{
+ unsigned int val;
+
+ if (phy_interface_mode_is_rgmii(interface)) {
+ if (speed == SPEED_10) {
+ val = rsd->rgmii_10;
+ } else if (speed == SPEED_100) {
+ val = rsd->rgmii_100;
+ } else if (speed == SPEED_1000) {
+ val = rsd->rgmii_1000;
+ } else {
+ /* Phylink will not allow inappropriate speeds for
+ * interface modes, so this should never happen.
+ */
+ return -EINVAL;
+ }
+ } else if (interface == PHY_INTERFACE_MODE_RMII) {
+ if (speed == SPEED_10) {
+ val = rsd->rmii_10;
+ } else if (speed == SPEED_100) {
+ val = rsd->rmii_100;
+ } else {
+ /* Phylink will not allow inappropriate speeds for
+ * interface modes, so this should never happen.
+ */
+ return -EINVAL;
+ }
+ } else {
+ /* This should never happen, as .get_interfaces() limits
+ * the interface modes that are supported to RGMII and/or
+ * RMII.
+ */
+ return -EINVAL;
+ }
+
+ regmap_write(bsp_priv->grf, reg, val);
+
+ return 0;
+
+}
+
+static int rk_set_clk_mac_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
+{
+ struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
+ long rate;
+
+ rate = rgmii_clock(speed);
+ if (rate < 0)
+ return rate;
+
+ return clk_set_rate(clk_mac_speed, rate);
+}
+
#define HIWORD_UPDATE(val, mask, shift) \
((val) << (shift) | (mask) << ((shift) + 16))
@@ -177,42 +243,38 @@ static void px30_set_to_rmii(struct rk_priv_data *bsp_priv)
PX30_GMAC_PHY_INTF_SEL_RMII);
}
-static void px30_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int px30_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
- struct device *dev = &bsp_priv->pdev->dev;
- int ret;
+ struct device *dev = bsp_priv->dev;
+ unsigned int con1;
+ long rate;
if (!clk_mac_speed) {
dev_err(dev, "%s: Missing clk_mac_speed clock\n", __func__);
- return;
+ return -EINVAL;
}
if (speed == 10) {
- regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
- PX30_GMAC_SPEED_10M);
-
- ret = clk_set_rate(clk_mac_speed, 2500000);
- if (ret)
- dev_err(dev, "%s: set clk_mac_speed rate 2500000 failed: %d\n",
- __func__, ret);
+ con1 = PX30_GMAC_SPEED_10M;
+ rate = 2500000;
} else if (speed == 100) {
- regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
- PX30_GMAC_SPEED_100M);
-
- ret = clk_set_rate(clk_mac_speed, 25000000);
- if (ret)
- dev_err(dev, "%s: set clk_mac_speed rate 25000000 failed: %d\n",
- __func__, ret);
-
+ con1 = PX30_GMAC_SPEED_100M;
+ rate = 25000000;
} else {
dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ return -EINVAL;
}
+
+ regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1, con1);
+
+ return clk_set_rate(clk_mac_speed, rate);
}
static const struct rk_gmac_ops px30_ops = {
.set_to_rmii = px30_set_to_rmii,
- .set_rmii_speed = px30_set_rmii_speed,
+ .set_speed = px30_set_speed,
};
#define RK3128_GRF_MAC_CON0 0x0168
@@ -261,45 +323,25 @@ static void rk3128_set_to_rmii(struct rk_priv_data *bsp_priv)
RK3128_GMAC_PHY_INTF_SEL_RMII | RK3128_GMAC_RMII_MODE);
}
-static void rk3128_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_CLK_2_5M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_CLK_25M);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_CLK_125M);
- else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
-}
+static const struct rk_reg_speed_data rk3128_reg_speed_data = {
+ .rgmii_10 = RK3128_GMAC_CLK_2_5M,
+ .rgmii_100 = RK3128_GMAC_CLK_25M,
+ .rgmii_1000 = RK3128_GMAC_CLK_125M,
+ .rmii_10 = RK3128_GMAC_RMII_CLK_2_5M | RK3128_GMAC_SPEED_10M,
+ .rmii_100 = RK3128_GMAC_RMII_CLK_25M | RK3128_GMAC_SPEED_100M,
+};
-static void rk3128_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3128_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (speed == 10) {
- regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_RMII_CLK_2_5M |
- RK3128_GMAC_SPEED_10M);
- } else if (speed == 100) {
- regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_RMII_CLK_25M |
- RK3128_GMAC_SPEED_100M);
- } else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- }
+ return rk_set_reg_speed(bsp_priv, &rk3128_reg_speed_data,
+ RK3128_GRF_MAC_CON1, interface, speed);
}
static const struct rk_gmac_ops rk3128_ops = {
.set_to_rgmii = rk3128_set_to_rgmii,
.set_to_rmii = rk3128_set_to_rmii,
- .set_rgmii_speed = rk3128_set_rgmii_speed,
- .set_rmii_speed = rk3128_set_rmii_speed,
+ .set_speed = rk3128_set_speed,
};
#define RK3228_GRF_MAC_CON0 0x0900
@@ -358,37 +400,19 @@ static void rk3228_set_to_rmii(struct rk_priv_data *bsp_priv)
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, GRF_BIT(11));
}
-static void rk3228_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_CLK_2_5M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_CLK_25M);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_CLK_125M);
- else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
-}
+static const struct rk_reg_speed_data rk3228_reg_speed_data = {
+ .rgmii_10 = RK3228_GMAC_CLK_2_5M,
+ .rgmii_100 = RK3228_GMAC_CLK_25M,
+ .rgmii_1000 = RK3228_GMAC_CLK_125M,
+ .rmii_10 = RK3228_GMAC_RMII_CLK_2_5M | RK3228_GMAC_SPEED_10M,
+ .rmii_100 = RK3228_GMAC_RMII_CLK_25M | RK3228_GMAC_SPEED_100M,
+};
-static void rk3228_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3228_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_RMII_CLK_2_5M |
- RK3228_GMAC_SPEED_10M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_RMII_CLK_25M |
- RK3228_GMAC_SPEED_100M);
- else
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ return rk_set_reg_speed(bsp_priv, &rk3228_reg_speed_data,
+ RK3228_GRF_MAC_CON1, interface, speed);
}
static void rk3228_integrated_phy_powerup(struct rk_priv_data *priv)
@@ -402,8 +426,7 @@ static void rk3228_integrated_phy_powerup(struct rk_priv_data *priv)
static const struct rk_gmac_ops rk3228_ops = {
.set_to_rgmii = rk3228_set_to_rgmii,
.set_to_rmii = rk3228_set_to_rmii,
- .set_rgmii_speed = rk3228_set_rgmii_speed,
- .set_rmii_speed = rk3228_set_rmii_speed,
+ .set_speed = rk3228_set_speed,
.integrated_phy_powerup = rk3228_integrated_phy_powerup,
.integrated_phy_powerdown = rk_gmac_integrated_ephy_powerdown,
};
@@ -454,45 +477,25 @@ static void rk3288_set_to_rmii(struct rk_priv_data *bsp_priv)
RK3288_GMAC_PHY_INTF_SEL_RMII | RK3288_GMAC_RMII_MODE);
}
-static void rk3288_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_CLK_2_5M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_CLK_25M);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_CLK_125M);
- else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
-}
+static const struct rk_reg_speed_data rk3288_reg_speed_data = {
+ .rgmii_10 = RK3288_GMAC_CLK_2_5M,
+ .rgmii_100 = RK3288_GMAC_CLK_25M,
+ .rgmii_1000 = RK3288_GMAC_CLK_125M,
+ .rmii_10 = RK3288_GMAC_RMII_CLK_2_5M | RK3288_GMAC_SPEED_10M,
+ .rmii_100 = RK3288_GMAC_RMII_CLK_25M | RK3288_GMAC_SPEED_100M,
+};
-static void rk3288_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3288_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (speed == 10) {
- regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_RMII_CLK_2_5M |
- RK3288_GMAC_SPEED_10M);
- } else if (speed == 100) {
- regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_RMII_CLK_25M |
- RK3288_GMAC_SPEED_100M);
- } else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- }
+ return rk_set_reg_speed(bsp_priv, &rk3288_reg_speed_data,
+ RK3288_GRF_SOC_CON1, interface, speed);
}
static const struct rk_gmac_ops rk3288_ops = {
.set_to_rgmii = rk3288_set_to_rgmii,
.set_to_rmii = rk3288_set_to_rmii,
- .set_rgmii_speed = rk3288_set_rgmii_speed,
- .set_rmii_speed = rk3288_set_rmii_speed,
+ .set_speed = rk3288_set_speed,
};
#define RK3308_GRF_MAC_CON0 0x04a0
@@ -511,24 +514,21 @@ static void rk3308_set_to_rmii(struct rk_priv_data *bsp_priv)
RK3308_GMAC_PHY_INTF_SEL_RMII);
}
-static void rk3308_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
+static const struct rk_reg_speed_data rk3308_reg_speed_data = {
+ .rmii_10 = RK3308_GMAC_SPEED_10M,
+ .rmii_100 = RK3308_GMAC_SPEED_100M,
+};
- if (speed == 10) {
- regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
- RK3308_GMAC_SPEED_10M);
- } else if (speed == 100) {
- regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
- RK3308_GMAC_SPEED_100M);
- } else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- }
+static int rk3308_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
+{
+ return rk_set_reg_speed(bsp_priv, &rk3308_reg_speed_data,
+ RK3308_GRF_MAC_CON0, interface, speed);
}
static const struct rk_gmac_ops rk3308_ops = {
.set_to_rmii = rk3308_set_to_rmii,
- .set_rmii_speed = rk3308_set_rmii_speed,
+ .set_speed = rk3308_set_speed,
};
#define RK3328_GRF_MAC_CON0 0x0900
@@ -590,41 +590,26 @@ static void rk3328_set_to_rmii(struct rk_priv_data *bsp_priv)
RK3328_GMAC_RMII_MODE);
}
-static void rk3328_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
- RK3328_GMAC_CLK_2_5M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
- RK3328_GMAC_CLK_25M);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
- RK3328_GMAC_CLK_125M);
- else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
-}
+static const struct rk_reg_speed_data rk3328_reg_speed_data = {
+ .rgmii_10 = RK3328_GMAC_CLK_2_5M,
+ .rgmii_100 = RK3328_GMAC_CLK_25M,
+ .rgmii_1000 = RK3328_GMAC_CLK_125M,
+ .rmii_10 = RK3328_GMAC_RMII_CLK_2_5M | RK3328_GMAC_SPEED_10M,
+ .rmii_100 = RK3328_GMAC_RMII_CLK_25M | RK3328_GMAC_SPEED_100M,
+};
-static void rk3328_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3328_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
unsigned int reg;
- reg = bsp_priv->integrated_phy ? RK3328_GRF_MAC_CON2 :
- RK3328_GRF_MAC_CON1;
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, reg,
- RK3328_GMAC_RMII_CLK_2_5M |
- RK3328_GMAC_SPEED_10M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, reg,
- RK3328_GMAC_RMII_CLK_25M |
- RK3328_GMAC_SPEED_100M);
+ if (interface == PHY_INTERFACE_MODE_RMII && bsp_priv->integrated_phy)
+ reg = RK3328_GRF_MAC_CON2;
else
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ reg = RK3328_GRF_MAC_CON1;
+
+ return rk_set_reg_speed(bsp_priv, &rk3328_reg_speed_data, reg,
+ interface, speed);
}
static void rk3328_integrated_phy_powerup(struct rk_priv_data *priv)
@@ -638,8 +623,7 @@ static void rk3328_integrated_phy_powerup(struct rk_priv_data *priv)
static const struct rk_gmac_ops rk3328_ops = {
.set_to_rgmii = rk3328_set_to_rgmii,
.set_to_rmii = rk3328_set_to_rmii,
- .set_rgmii_speed = rk3328_set_rgmii_speed,
- .set_rmii_speed = rk3328_set_rmii_speed,
+ .set_speed = rk3328_set_speed,
.integrated_phy_powerup = rk3328_integrated_phy_powerup,
.integrated_phy_powerdown = rk_gmac_integrated_ephy_powerdown,
};
@@ -690,45 +674,25 @@ static void rk3366_set_to_rmii(struct rk_priv_data *bsp_priv)
RK3366_GMAC_PHY_INTF_SEL_RMII | RK3366_GMAC_RMII_MODE);
}
-static void rk3366_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_CLK_2_5M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_CLK_25M);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_CLK_125M);
- else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
-}
+static const struct rk_reg_speed_data rk3366_reg_speed_data = {
+ .rgmii_10 = RK3366_GMAC_CLK_2_5M,
+ .rgmii_100 = RK3366_GMAC_CLK_25M,
+ .rgmii_1000 = RK3366_GMAC_CLK_125M,
+ .rmii_10 = RK3366_GMAC_RMII_CLK_2_5M | RK3366_GMAC_SPEED_10M,
+ .rmii_100 = RK3366_GMAC_RMII_CLK_25M | RK3366_GMAC_SPEED_100M,
+};
-static void rk3366_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3366_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (speed == 10) {
- regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_RMII_CLK_2_5M |
- RK3366_GMAC_SPEED_10M);
- } else if (speed == 100) {
- regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_RMII_CLK_25M |
- RK3366_GMAC_SPEED_100M);
- } else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- }
+ return rk_set_reg_speed(bsp_priv, &rk3366_reg_speed_data,
+ RK3366_GRF_SOC_CON6, interface, speed);
}
static const struct rk_gmac_ops rk3366_ops = {
.set_to_rgmii = rk3366_set_to_rgmii,
.set_to_rmii = rk3366_set_to_rmii,
- .set_rgmii_speed = rk3366_set_rgmii_speed,
- .set_rmii_speed = rk3366_set_rmii_speed,
+ .set_speed = rk3366_set_speed,
};
#define RK3368_GRF_SOC_CON15 0x043c
@@ -777,45 +741,25 @@ static void rk3368_set_to_rmii(struct rk_priv_data *bsp_priv)
RK3368_GMAC_PHY_INTF_SEL_RMII | RK3368_GMAC_RMII_MODE);
}
-static void rk3368_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_CLK_2_5M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_CLK_25M);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_CLK_125M);
- else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
-}
+static const struct rk_reg_speed_data rk3368_reg_speed_data = {
+ .rgmii_10 = RK3368_GMAC_CLK_2_5M,
+ .rgmii_100 = RK3368_GMAC_CLK_25M,
+ .rgmii_1000 = RK3368_GMAC_CLK_125M,
+ .rmii_10 = RK3368_GMAC_RMII_CLK_2_5M | RK3368_GMAC_SPEED_10M,
+ .rmii_100 = RK3368_GMAC_RMII_CLK_25M | RK3368_GMAC_SPEED_100M,
+};
-static void rk3368_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3368_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (speed == 10) {
- regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_RMII_CLK_2_5M |
- RK3368_GMAC_SPEED_10M);
- } else if (speed == 100) {
- regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_RMII_CLK_25M |
- RK3368_GMAC_SPEED_100M);
- } else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- }
+ return rk_set_reg_speed(bsp_priv, &rk3368_reg_speed_data,
+ RK3368_GRF_SOC_CON15, interface, speed);
}
static const struct rk_gmac_ops rk3368_ops = {
.set_to_rgmii = rk3368_set_to_rgmii,
.set_to_rmii = rk3368_set_to_rmii,
- .set_rgmii_speed = rk3368_set_rgmii_speed,
- .set_rmii_speed = rk3368_set_rmii_speed,
+ .set_speed = rk3368_set_speed,
};
#define RK3399_GRF_SOC_CON5 0xc214
@@ -864,45 +808,25 @@ static void rk3399_set_to_rmii(struct rk_priv_data *bsp_priv)
RK3399_GMAC_PHY_INTF_SEL_RMII | RK3399_GMAC_RMII_MODE);
}
-static void rk3399_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_CLK_2_5M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_CLK_25M);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_CLK_125M);
- else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
-}
+static const struct rk_reg_speed_data rk3399_reg_speed_data = {
+ .rgmii_10 = RK3399_GMAC_CLK_2_5M,
+ .rgmii_100 = RK3399_GMAC_CLK_25M,
+ .rgmii_1000 = RK3399_GMAC_CLK_125M,
+ .rmii_10 = RK3399_GMAC_RMII_CLK_2_5M | RK3399_GMAC_SPEED_10M,
+ .rmii_100 = RK3399_GMAC_RMII_CLK_25M | RK3399_GMAC_SPEED_100M,
+};
-static void rk3399_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3399_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (speed == 10) {
- regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_RMII_CLK_2_5M |
- RK3399_GMAC_SPEED_10M);
- } else if (speed == 100) {
- regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_RMII_CLK_25M |
- RK3399_GMAC_SPEED_100M);
- } else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- }
+ return rk_set_reg_speed(bsp_priv, &rk3399_reg_speed_data,
+ RK3399_GRF_SOC_CON5, interface, speed);
}
static const struct rk_gmac_ops rk3399_ops = {
.set_to_rgmii = rk3399_set_to_rgmii,
.set_to_rmii = rk3399_set_to_rmii,
- .set_rgmii_speed = rk3399_set_rgmii_speed,
- .set_rmii_speed = rk3399_set_rmii_speed,
+ .set_speed = rk3399_set_speed,
};
#define RK3528_VO_GRF_GMAC_CON 0x0018
@@ -965,43 +889,34 @@ static void rk3528_set_to_rmii(struct rk_priv_data *bsp_priv)
RK3528_GMAC0_CLK_RMII_DIV2);
}
-static void rk3528_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
+static const struct rk_reg_speed_data rk3528_gmac0_reg_speed_data = {
+ .rmii_10 = RK3528_GMAC0_CLK_RMII_DIV20,
+ .rmii_100 = RK3528_GMAC0_CLK_RMII_DIV2,
+};
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
- RK3528_GMAC1_CLK_RGMII_DIV50);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
- RK3528_GMAC1_CLK_RGMII_DIV5);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
- RK3528_GMAC1_CLK_RGMII_DIV1);
- else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
-}
+static const struct rk_reg_speed_data rk3528_gmac1_reg_speed_data = {
+ .rgmii_10 = RK3528_GMAC1_CLK_RGMII_DIV50,
+ .rgmii_100 = RK3528_GMAC1_CLK_RGMII_DIV5,
+ .rgmii_1000 = RK3528_GMAC1_CLK_RGMII_DIV1,
+ .rmii_10 = RK3528_GMAC1_CLK_RMII_DIV20,
+ .rmii_100 = RK3528_GMAC1_CLK_RMII_DIV2,
+};
-static void rk3528_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3528_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
- unsigned int reg, val;
+ const struct rk_reg_speed_data *rsd;
+ unsigned int reg;
- if (speed == 10)
- val = bsp_priv->id == 1 ? RK3528_GMAC1_CLK_RMII_DIV20 :
- RK3528_GMAC0_CLK_RMII_DIV20;
- else if (speed == 100)
- val = bsp_priv->id == 1 ? RK3528_GMAC1_CLK_RMII_DIV2 :
- RK3528_GMAC0_CLK_RMII_DIV2;
- else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- return;
+ if (bsp_priv->id == 1) {
+ rsd = &rk3528_gmac1_reg_speed_data;
+ reg = RK3528_VPU_GRF_GMAC_CON5;
+ } else {
+ rsd = &rk3528_gmac0_reg_speed_data;
+ reg = RK3528_VO_GRF_GMAC_CON;
}
- reg = bsp_priv->id == 1 ? RK3528_VPU_GRF_GMAC_CON5 :
- RK3528_VO_GRF_GMAC_CON;
-
- regmap_write(bsp_priv->grf, reg, val);
+ return rk_set_reg_speed(bsp_priv, rsd, reg, interface, speed);
}
static void rk3528_set_clock_selection(struct rk_priv_data *bsp_priv,
@@ -1035,8 +950,7 @@ static void rk3528_integrated_phy_powerdown(struct rk_priv_data *bsp_priv)
static const struct rk_gmac_ops rk3528_ops = {
.set_to_rgmii = rk3528_set_to_rgmii,
.set_to_rmii = rk3528_set_to_rmii,
- .set_rgmii_speed = rk3528_set_rgmii_speed,
- .set_rmii_speed = rk3528_set_rmii_speed,
+ .set_speed = rk3528_set_speed,
.set_clock_selection = rk3528_set_clock_selection,
.integrated_phy_powerup = rk3528_integrated_phy_powerup,
.integrated_phy_powerdown = rk3528_integrated_phy_powerdown,
@@ -1098,30 +1012,10 @@ static void rk3568_set_to_rmii(struct rk_priv_data *bsp_priv)
regmap_write(bsp_priv->grf, con1, RK3568_GMAC_PHY_INTF_SEL_RMII);
}
-static void rk3568_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
- struct device *dev = &bsp_priv->pdev->dev;
- long rate;
- int ret;
-
- rate = rgmii_clock(speed);
- if (rate < 0) {
- dev_err(dev, "unknown speed value for GMAC speed=%d", speed);
- return;
- }
-
- ret = clk_set_rate(clk_mac_speed, rate);
- if (ret)
- dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
- __func__, rate, ret);
-}
-
static const struct rk_gmac_ops rk3568_ops = {
.set_to_rgmii = rk3568_set_to_rgmii,
.set_to_rmii = rk3568_set_to_rmii,
- .set_rgmii_speed = rk3568_set_gmac_speed,
- .set_rmii_speed = rk3568_set_gmac_speed,
+ .set_speed = rk_set_clk_mac_speed,
.regs_valid = true,
.regs = {
0xfe2a0000, /* gmac0 */
@@ -1205,42 +1099,24 @@ static void rk3576_set_to_rmii(struct rk_priv_data *bsp_priv)
regmap_write(bsp_priv->grf, offset_con, RK3576_GMAC_RMII_MODE);
}
-static void rk3576_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
- unsigned int val = 0, offset_con;
+static const struct rk_reg_speed_data rk3578_reg_speed_data = {
+ .rgmii_10 = RK3576_GMAC_CLK_RGMII_DIV50,
+ .rgmii_100 = RK3576_GMAC_CLK_RGMII_DIV5,
+ .rgmii_1000 = RK3576_GMAC_CLK_RGMII_DIV1,
+ .rmii_10 = RK3576_GMAC_CLK_RMII_DIV20,
+ .rmii_100 = RK3576_GMAC_CLK_RMII_DIV2,
+};
- switch (speed) {
- case 10:
- if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
- val = RK3576_GMAC_CLK_RMII_DIV20;
- else
- val = RK3576_GMAC_CLK_RGMII_DIV50;
- break;
- case 100:
- if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
- val = RK3576_GMAC_CLK_RMII_DIV2;
- else
- val = RK3576_GMAC_CLK_RGMII_DIV5;
- break;
- case 1000:
- if (bsp_priv->phy_iface != PHY_INTERFACE_MODE_RMII)
- val = RK3576_GMAC_CLK_RGMII_DIV1;
- else
- goto err;
- break;
- default:
- goto err;
- }
+static int rk3576_set_gmac_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
+{
+ unsigned int offset_con;
offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
RK3576_GRF_GMAC_CON0;
- regmap_write(bsp_priv->grf, offset_con, val);
-
- return;
-err:
- dev_err(dev, "unknown speed value for GMAC speed=%d", speed);
+ return rk_set_reg_speed(bsp_priv, &rk3578_reg_speed_data, offset_con,
+ interface, speed);
}
static void rk3576_set_clock_selection(struct rk_priv_data *bsp_priv, bool input,
@@ -1262,8 +1138,7 @@ static void rk3576_set_clock_selection(struct rk_priv_data *bsp_priv, bool input
static const struct rk_gmac_ops rk3576_ops = {
.set_to_rgmii = rk3576_set_to_rgmii,
.set_to_rmii = rk3576_set_to_rmii,
- .set_rgmii_speed = rk3576_set_gmac_speed,
- .set_rmii_speed = rk3576_set_gmac_speed,
+ .set_speed = rk3576_set_gmac_speed,
.set_clock_selection = rk3576_set_clock_selection,
.php_grf_required = true,
.regs_valid = true,
@@ -1347,26 +1222,26 @@ static void rk3588_set_to_rmii(struct rk_priv_data *bsp_priv)
RK3588_GMAC_CLK_RMII_MODE(bsp_priv->id));
}
-static void rk3588_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3588_set_gmac_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
unsigned int val = 0, id = bsp_priv->id;
switch (speed) {
case 10:
- if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+ if (interface == PHY_INTERFACE_MODE_RMII)
val = RK3588_GMA_CLK_RMII_DIV20(id);
else
val = RK3588_GMAC_CLK_RGMII_DIV50(id);
break;
case 100:
- if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+ if (interface == PHY_INTERFACE_MODE_RMII)
val = RK3588_GMA_CLK_RMII_DIV2(id);
else
val = RK3588_GMAC_CLK_RGMII_DIV5(id);
break;
case 1000:
- if (bsp_priv->phy_iface != PHY_INTERFACE_MODE_RMII)
+ if (interface != PHY_INTERFACE_MODE_RMII)
val = RK3588_GMAC_CLK_RGMII_DIV1(id);
else
goto err;
@@ -1377,9 +1252,9 @@ static void rk3588_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1, val);
- return;
+ return 0;
err:
- dev_err(dev, "unknown speed value for GMAC speed=%d", speed);
+ return -EINVAL;
}
static void rk3588_set_clock_selection(struct rk_priv_data *bsp_priv, bool input,
@@ -1397,8 +1272,7 @@ static void rk3588_set_clock_selection(struct rk_priv_data *bsp_priv, bool input
static const struct rk_gmac_ops rk3588_ops = {
.set_to_rgmii = rk3588_set_to_rgmii,
.set_to_rmii = rk3588_set_to_rmii,
- .set_rgmii_speed = rk3588_set_gmac_speed,
- .set_rmii_speed = rk3588_set_gmac_speed,
+ .set_speed = rk3588_set_gmac_speed,
.set_clock_selection = rk3588_set_clock_selection,
.php_grf_required = true,
.regs_valid = true,
@@ -1427,26 +1301,21 @@ static void rv1108_set_to_rmii(struct rk_priv_data *bsp_priv)
RV1108_GMAC_PHY_INTF_SEL_RMII);
}
-static void rv1108_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
+static const struct rk_reg_speed_data rv1108_reg_speed_data = {
+ .rmii_10 = RV1108_GMAC_RMII_CLK_2_5M | RV1108_GMAC_SPEED_10M,
+ .rmii_100 = RV1108_GMAC_RMII_CLK_25M | RV1108_GMAC_SPEED_100M,
+};
- if (speed == 10) {
- regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0,
- RV1108_GMAC_RMII_CLK_2_5M |
- RV1108_GMAC_SPEED_10M);
- } else if (speed == 100) {
- regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0,
- RV1108_GMAC_RMII_CLK_25M |
- RV1108_GMAC_SPEED_100M);
- } else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- }
+static int rv1108_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
+{
+ return rk_set_reg_speed(bsp_priv, &rv1108_reg_speed_data,
+ RV1108_GRF_GMAC_CON0, interface, speed);
}
static const struct rk_gmac_ops rv1108_ops = {
.set_to_rmii = rv1108_set_to_rmii,
- .set_rmii_speed = rv1108_set_rmii_speed,
+ .set_speed = rv1108_set_speed,
};
#define RV1126_GRF_GMAC_CON0 0X0070
@@ -1501,62 +1370,17 @@ static void rv1126_set_to_rmii(struct rk_priv_data *bsp_priv)
RV1126_GMAC_PHY_INTF_SEL_RMII);
}
-static void rv1126_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
- struct device *dev = &bsp_priv->pdev->dev;
- long rate;
- int ret;
-
- rate = rgmii_clock(speed);
- if (rate < 0) {
- dev_err(dev, "unknown speed value for RGMII speed=%d", speed);
- return;
- }
-
- ret = clk_set_rate(clk_mac_speed, rate);
- if (ret)
- dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
- __func__, rate, ret);
-}
-
-static void rv1126_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
- struct device *dev = &bsp_priv->pdev->dev;
- unsigned long rate;
- int ret;
-
- switch (speed) {
- case 10:
- rate = 2500000;
- break;
- case 100:
- rate = 25000000;
- break;
- default:
- dev_err(dev, "unknown speed value for RGMII speed=%d", speed);
- return;
- }
-
- ret = clk_set_rate(clk_mac_speed, rate);
- if (ret)
- dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
- __func__, rate, ret);
-}
-
static const struct rk_gmac_ops rv1126_ops = {
.set_to_rgmii = rv1126_set_to_rgmii,
.set_to_rmii = rv1126_set_to_rmii,
- .set_rgmii_speed = rv1126_set_rgmii_speed,
- .set_rmii_speed = rv1126_set_rmii_speed,
+ .set_speed = rk_set_clk_mac_speed,
};
static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)
{
struct rk_priv_data *bsp_priv = plat->bsp_priv;
- struct device *dev = &bsp_priv->pdev->dev;
int phy_iface = bsp_priv->phy_iface;
+ struct device *dev = bsp_priv->dev;
int i, j, ret;
bsp_priv->clk_enabled = false;
@@ -1583,16 +1407,10 @@ static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)
if (ret)
return dev_err_probe(dev, ret, "Failed to get clocks\n");
- /* "stmmaceth" will be enabled by the core */
- bsp_priv->clk_mac = devm_clk_get(dev, "stmmaceth");
- ret = PTR_ERR_OR_ZERO(bsp_priv->clk_mac);
- if (ret)
- return dev_err_probe(dev, ret, "Cannot get stmmaceth clock\n");
-
if (bsp_priv->clock_input) {
dev_info(dev, "clock input from PHY\n");
} else if (phy_iface == PHY_INTERFACE_MODE_RMII) {
- clk_set_rate(bsp_priv->clk_mac, 50000000);
+ clk_set_rate(plat->stmmac_clk, 50000000);
}
if (plat->phy_node && bsp_priv->integrated_phy) {
@@ -1648,8 +1466,8 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
{
struct regulator *ldo = bsp_priv->regulator;
+ struct device *dev = bsp_priv->dev;
int ret;
- struct device *dev = &bsp_priv->pdev->dev;
if (enable) {
ret = regulator_enable(ldo);
@@ -1773,7 +1591,7 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
dev_info(dev, "integrated PHY? (%s).\n",
bsp_priv->integrated_phy ? "yes" : "no");
- bsp_priv->pdev = pdev;
+ bsp_priv->dev = dev;
return bsp_priv;
}
@@ -1793,7 +1611,7 @@ static int rk_gmac_check_ops(struct rk_priv_data *bsp_priv)
return -EINVAL;
break;
default:
- dev_err(&bsp_priv->pdev->dev,
+ dev_err(bsp_priv->dev,
"unsupported interface %d", bsp_priv->phy_iface);
}
return 0;
@@ -1801,8 +1619,8 @@ static int rk_gmac_check_ops(struct rk_priv_data *bsp_priv)
static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
{
+ struct device *dev = bsp_priv->dev;
int ret;
- struct device *dev = &bsp_priv->pdev->dev;
ret = rk_gmac_check_ops(bsp_priv);
if (ret)
@@ -1858,35 +1676,34 @@ static void rk_gmac_powerdown(struct rk_priv_data *gmac)
if (gmac->integrated_phy && gmac->ops->integrated_phy_powerdown)
gmac->ops->integrated_phy_powerdown(gmac);
- pm_runtime_put_sync(&gmac->pdev->dev);
+ pm_runtime_put_sync(gmac->dev);
phy_power_on(gmac, false);
gmac_clk_enable(gmac, false);
}
+static void rk_get_interfaces(struct stmmac_priv *priv, void *bsp_priv,
+ unsigned long *interfaces)
+{
+ struct rk_priv_data *rk = bsp_priv;
+
+ if (rk->ops->set_to_rgmii)
+ phy_interface_set_rgmii(interfaces);
+
+ if (rk->ops->set_to_rmii)
+ __set_bit(PHY_INTERFACE_MODE_RMII, interfaces);
+}
+
static int rk_set_clk_tx_rate(void *bsp_priv_, struct clk *clk_tx_i,
phy_interface_t interface, int speed)
{
struct rk_priv_data *bsp_priv = bsp_priv_;
- struct device *dev = &bsp_priv->pdev->dev;
- switch (bsp_priv->phy_iface) {
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- if (bsp_priv->ops->set_rgmii_speed)
- bsp_priv->ops->set_rgmii_speed(bsp_priv, speed);
- break;
- case PHY_INTERFACE_MODE_RMII:
- if (bsp_priv->ops->set_rmii_speed)
- bsp_priv->ops->set_rmii_speed(bsp_priv, speed);
- break;
- default:
- dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
- }
+ if (bsp_priv->ops->set_speed)
+ return bsp_priv->ops->set_speed(bsp_priv, bsp_priv->phy_iface,
+ speed);
- return 0;
+ return -EINVAL;
}
static int rk_gmac_probe(struct platform_device *pdev)
@@ -1919,6 +1736,7 @@ static int rk_gmac_probe(struct platform_device *pdev)
plat_dat->tx_fifo_size = 2048;
}
+ plat_dat->get_interfaces = rk_get_interfaces;
plat_dat->set_clk_tx_rate = rk_set_clk_tx_rate;
plat_dat->bsp_priv = rk_gmac_setup(pdev, plat_dat, data);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 72b50f6d72f4..01dd0cf0923c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -515,6 +515,7 @@ static const struct socfpga_dwmac_ops socfpga_gen10_ops = {
static const struct of_device_id socfpga_dwmac_match[] = {
{ .compatible = "altr,socfpga-stmmac", .data = &socfpga_gen5_ops },
{ .compatible = "altr,socfpga-stmmac-a10-s10", .data = &socfpga_gen10_ops },
+ { .compatible = "altr,socfpga-stmmac-agilex5", .data = &socfpga_gen10_ops },
{ }
};
MODULE_DEVICE_TABLE(of, socfpga_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c
index 3303784cbbf8..3b7947a7a7ba 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c
@@ -54,6 +54,7 @@ static int sophgo_dwmac_probe(struct platform_device *pdev)
}
static const struct of_device_id sophgo_dwmac_match[] = {
+ { .compatible = "sophgo,sg2042-dwmac" },
{ .compatible = "sophgo,sg2044-dwmac" },
{ /* sentinel */ }
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
index 5e6ac82a89b9..bd65d4239054 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -48,7 +48,6 @@
struct visconti_eth {
void __iomem *reg;
- u32 phy_intf_sel;
struct clk *phy_ref_clk;
struct device *dev;
};
@@ -57,42 +56,35 @@ static int visconti_eth_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
phy_interface_t interface, int speed)
{
struct visconti_eth *dwmac = bsp_priv;
- struct net_device *netdev = dev_get_drvdata(dwmac->dev);
- unsigned int val, clk_sel_val = 0;
-
- switch (speed) {
- case SPEED_1000:
- if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RGMII)
- clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_125M;
- break;
- case SPEED_100:
- if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RGMII)
- clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_25M;
- if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RMII)
- clk_sel_val = ETHER_CLK_SEL_DIV_SEL_2;
- break;
- case SPEED_10:
- if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RGMII)
- clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_2P5M;
- if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RMII)
- clk_sel_val = ETHER_CLK_SEL_DIV_SEL_20;
- break;
- default:
- /* No bit control */
- netdev_err(netdev, "Unsupported speed request (%d)", speed);
- return -EINVAL;
- }
-
- /* Stop internal clock */
- val = readl(dwmac->reg + REG_ETHER_CLOCK_SEL);
- val &= ~(ETHER_CLK_SEL_RMII_CLK_EN | ETHER_CLK_SEL_RX_TX_CLK_EN);
- val |= ETHER_CLK_SEL_TX_O_E_N_IN;
- writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+ unsigned long clk_sel, val;
+
+ if (phy_interface_mode_is_rgmii(interface)) {
+ switch (speed) {
+ case SPEED_1000:
+ clk_sel = ETHER_CLK_SEL_FREQ_SEL_125M;
+ break;
+
+ case SPEED_100:
+ clk_sel = ETHER_CLK_SEL_FREQ_SEL_25M;
+ break;
+
+ case SPEED_10:
+ clk_sel = ETHER_CLK_SEL_FREQ_SEL_2P5M;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Stop internal clock */
+ val = readl(dwmac->reg + REG_ETHER_CLOCK_SEL);
+ val &= ~(ETHER_CLK_SEL_RMII_CLK_EN |
+ ETHER_CLK_SEL_RX_TX_CLK_EN);
+ val |= ETHER_CLK_SEL_TX_O_E_N_IN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
- /* Set Clock-Mux, Start clock, Set TX_O direction */
- switch (dwmac->phy_intf_sel) {
- case ETHER_CONFIG_INTF_RGMII:
- val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC;
+ /* Set Clock-Mux, Start clock, Set TX_O direction */
+ val = clk_sel | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC;
writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
@@ -100,11 +92,32 @@ static int visconti_eth_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
val &= ~ETHER_CLK_SEL_TX_O_E_N_IN;
writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
- break;
- case ETHER_CONFIG_INTF_RMII:
- val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_DIV |
- ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV | ETHER_CLK_SEL_TX_O_E_N_IN |
- ETHER_CLK_SEL_RMII_CLK_SEL_RX_C;
+ } else if (interface == PHY_INTERFACE_MODE_RMII) {
+ switch (speed) {
+ case SPEED_100:
+ clk_sel = ETHER_CLK_SEL_DIV_SEL_2;
+ break;
+
+ case SPEED_10:
+ clk_sel = ETHER_CLK_SEL_DIV_SEL_20;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Stop internal clock */
+ val = readl(dwmac->reg + REG_ETHER_CLOCK_SEL);
+ val &= ~(ETHER_CLK_SEL_RMII_CLK_EN |
+ ETHER_CLK_SEL_RX_TX_CLK_EN);
+ val |= ETHER_CLK_SEL_TX_O_E_N_IN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ /* Set Clock-Mux, Start clock, Set TX_O direction */
+ val = clk_sel | ETHER_CLK_SEL_RX_CLK_EXT_SEL_DIV |
+ ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV |
+ ETHER_CLK_SEL_TX_O_E_N_IN |
+ ETHER_CLK_SEL_RMII_CLK_SEL_RX_C;
writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
val |= ETHER_CLK_SEL_RMII_CLK_RST;
@@ -112,16 +125,22 @@ static int visconti_eth_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
val |= ETHER_CLK_SEL_RMII_CLK_EN | ETHER_CLK_SEL_RX_TX_CLK_EN;
writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
- break;
- case ETHER_CONFIG_INTF_MII:
- default:
- val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC |
- ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC | ETHER_CLK_SEL_TX_O_E_N_IN;
+ } else {
+ /* Stop internal clock */
+ val = readl(dwmac->reg + REG_ETHER_CLOCK_SEL);
+ val &= ~(ETHER_CLK_SEL_RMII_CLK_EN |
+ ETHER_CLK_SEL_RX_TX_CLK_EN);
+ val |= ETHER_CLK_SEL_TX_O_E_N_IN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ /* Set Clock-Mux, Start clock, Set TX_O direction */
+ val = ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC |
+ ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC |
+ ETHER_CLK_SEL_TX_O_E_N_IN;
writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
- break;
}
return 0;
@@ -130,28 +149,28 @@ static int visconti_eth_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
static int visconti_eth_init_hw(struct platform_device *pdev, struct plat_stmmacenet_data *plat_dat)
{
struct visconti_eth *dwmac = plat_dat->bsp_priv;
- unsigned int reg_val, clk_sel_val;
+ unsigned int clk_sel_val;
+ u32 phy_intf_sel;
switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
- dwmac->phy_intf_sel = ETHER_CONFIG_INTF_RGMII;
+ phy_intf_sel = ETHER_CONFIG_INTF_RGMII;
break;
case PHY_INTERFACE_MODE_MII:
- dwmac->phy_intf_sel = ETHER_CONFIG_INTF_MII;
+ phy_intf_sel = ETHER_CONFIG_INTF_MII;
break;
case PHY_INTERFACE_MODE_RMII:
- dwmac->phy_intf_sel = ETHER_CONFIG_INTF_RMII;
+ phy_intf_sel = ETHER_CONFIG_INTF_RMII;
break;
default:
dev_err(&pdev->dev, "Unsupported phy-mode (%d)\n", plat_dat->phy_interface);
return -EOPNOTSUPP;
}
- reg_val = dwmac->phy_intf_sel;
- writel(reg_val, dwmac->reg + REG_ETHER_CONTROL);
+ writel(phy_intf_sel, dwmac->reg + REG_ETHER_CONTROL);
/* Enable TX/RX clock */
clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_125M;
@@ -161,8 +180,8 @@ static int visconti_eth_init_hw(struct platform_device *pdev, struct plat_stmmac
dwmac->reg + REG_ETHER_CLOCK_SEL);
/* release internal-reset */
- reg_val |= ETHER_ETH_CONTROL_RESET;
- writel(reg_val, dwmac->reg + REG_ETHER_CONTROL);
+ phy_intf_sel |= ETHER_ETH_CONTROL_RESET;
+ writel(phy_intf_sel, dwmac->reg + REG_ETHER_CONTROL);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 56b76aaa58f0..fe776ddf6889 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -393,15 +393,10 @@ static void dwmac1000_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
writel(value, ioaddr + LPI_TIMER_CTRL);
}
-static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
- bool loopback)
+static void dwmac1000_ctrl_ane(struct stmmac_priv *priv, bool ane,
+ bool srgmi_ral, bool loopback)
{
- dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
-}
-
-static void dwmac1000_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
-{
- dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
+ dwmac_ctrl_ane(priv->ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
}
static void dwmac1000_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
@@ -508,7 +503,6 @@ const struct stmmac_ops dwmac1000_ops = {
.set_eee_pls = dwmac1000_set_eee_pls,
.debug = dwmac1000_debug,
.pcs_ctrl_ane = dwmac1000_ctrl_ane,
- .pcs_get_adv_lp = dwmac1000_get_adv_lp,
.set_mac_loopback = dwmac1000_set_mac_loopback,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 9c2549d4100f..d85bc0bb5c3c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -583,15 +583,10 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
}
}
-static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
+static void dwmac4_ctrl_ane(struct stmmac_priv *priv, bool ane, bool srgmi_ral,
bool loopback)
{
- dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
-}
-
-static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
-{
- dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
+ dwmac_ctrl_ane(priv->ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
}
/* RGMII or SMII interface */
@@ -958,7 +953,6 @@ const struct stmmac_ops dwmac4_ops = {
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
- .pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
.set_mac_loopback = dwmac4_set_mac_loopback,
@@ -993,7 +987,6 @@ const struct stmmac_ops dwmac410_ops = {
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
- .pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
.flex_pps_config = dwmac5_flex_pps_config,
@@ -1030,7 +1023,6 @@ const struct stmmac_ops dwmac510_ops = {
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
- .pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
.safety_feat_config = dwmac5_safety_feat_config,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 7840bc403788..5dcc95bc0ad2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -364,19 +364,17 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
}
/* TX/RX NORMAL interrupts */
- if (likely(intr_status & XGMAC_NIS)) {
- if (likely(intr_status & XGMAC_RI)) {
- u64_stats_update_begin(&stats->syncp);
- u64_stats_inc(&stats->rx_normal_irq_n[chan]);
- u64_stats_update_end(&stats->syncp);
- ret |= handle_rx;
- }
- if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
- u64_stats_update_begin(&stats->syncp);
- u64_stats_inc(&stats->tx_normal_irq_n[chan]);
- u64_stats_update_end(&stats->syncp);
- ret |= handle_tx;
- }
+ if (likely(intr_status & XGMAC_RI)) {
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
+ ret |= handle_rx;
+ }
+ if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
+ ret |= handle_tx;
}
/* Clear interrupts */
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index ae4efffb785f..14dbe0685997 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -300,7 +300,6 @@ struct stmmac_dma_ops {
struct mac_device_info;
struct net_device;
-struct rgmii_adv;
struct stmmac_tc_entry;
struct stmmac_pps_cfg;
struct stmmac_rss;
@@ -375,9 +374,8 @@ struct stmmac_ops {
struct stmmac_extra_stats *x, u32 rx_queues,
u32 tx_queues);
/* PCS calls */
- void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral,
+ void (*pcs_ctrl_ane)(struct stmmac_priv *priv, bool ane, bool srgmi_ral,
bool loopback);
- void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv);
/* Safety Features */
int (*safety_feat_config)(void __iomem *ioaddr, unsigned int asp,
struct stmmac_safety_feature_cfg *safety_cfg);
@@ -466,9 +464,7 @@ struct stmmac_ops {
#define stmmac_mac_debug(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, debug, __priv, __args)
#define stmmac_pcs_ctrl_ane(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, pcs_ctrl_ane, __args)
-#define stmmac_pcs_get_adv_lp(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, pcs_get_adv_lp, __args)
+ stmmac_do_void_callback(__priv, mac, pcs_ctrl_ane, __priv, __args)
#define stmmac_safety_feat_config(__priv, __args...) \
stmmac_do_callback(__priv, mac, safety_feat_config, __args)
#define stmmac_safety_feat_irq_status(__priv, __args...) \
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index f702f7b7bf9f..77758a7299b4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -325,7 +325,6 @@ static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
if (!(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS) &&
(priv->hw->pcs & STMMAC_PCS_RGMII ||
priv->hw->pcs & STMMAC_PCS_SGMII)) {
- struct rgmii_adv adv;
u32 supported, advertising, lp_advertising;
if (!priv->xstats.pcs_link) {
@@ -337,10 +336,6 @@ static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
cmd->base.speed = priv->xstats.pcs_speed;
- /* Get and convert ADV/LP_ADV from the HW AN registers */
- if (stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv))
- return -EOPNOTSUPP; /* should never happen indeed */
-
/* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */
ethtool_convert_link_mode_to_legacy_u32(
@@ -350,44 +345,12 @@ static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
ethtool_convert_link_mode_to_legacy_u32(
&lp_advertising, cmd->link_modes.lp_advertising);
- if (adv.pause & STMMAC_PCS_PAUSE)
- advertising |= ADVERTISED_Pause;
- if (adv.pause & STMMAC_PCS_ASYM_PAUSE)
- advertising |= ADVERTISED_Asym_Pause;
- if (adv.lp_pause & STMMAC_PCS_PAUSE)
- lp_advertising |= ADVERTISED_Pause;
- if (adv.lp_pause & STMMAC_PCS_ASYM_PAUSE)
- lp_advertising |= ADVERTISED_Asym_Pause;
-
/* Reg49[3] always set because ANE is always supported */
cmd->base.autoneg = ADVERTISED_Autoneg;
supported |= SUPPORTED_Autoneg;
advertising |= ADVERTISED_Autoneg;
lp_advertising |= ADVERTISED_Autoneg;
- if (adv.duplex) {
- supported |= (SUPPORTED_1000baseT_Full |
- SUPPORTED_100baseT_Full |
- SUPPORTED_10baseT_Full);
- advertising |= (ADVERTISED_1000baseT_Full |
- ADVERTISED_100baseT_Full |
- ADVERTISED_10baseT_Full);
- } else {
- supported |= (SUPPORTED_1000baseT_Half |
- SUPPORTED_100baseT_Half |
- SUPPORTED_10baseT_Half);
- advertising |= (ADVERTISED_1000baseT_Half |
- ADVERTISED_100baseT_Half |
- ADVERTISED_10baseT_Half);
- }
- if (adv.lp_duplex)
- lp_advertising |= (ADVERTISED_1000baseT_Full |
- ADVERTISED_100baseT_Full |
- ADVERTISED_10baseT_Full);
- else
- lp_advertising |= (ADVERTISED_1000baseT_Half |
- ADVERTISED_100baseT_Half |
- ADVERTISED_10baseT_Half);
cmd->base.port = PORT_OTHER;
ethtool_convert_legacy_u32_to_link_mode(
@@ -417,7 +380,7 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev,
return -EINVAL;
mutex_lock(&priv->lock);
- stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
+ stmmac_pcs_ctrl_ane(priv, 1, priv->hw->ps, 0);
mutex_unlock(&priv->lock);
return 0;
@@ -515,12 +478,9 @@ stmmac_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct stmmac_priv *priv = netdev_priv(netdev);
- struct rgmii_adv adv_lp;
- if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) {
+ if (priv->hw->pcs) {
pause->autoneg = 1;
- if (!adv_lp.pause)
- return;
} else {
phylink_ethtool_get_pauseparam(priv->phylink, pause);
}
@@ -531,12 +491,9 @@ stmmac_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct stmmac_priv *priv = netdev_priv(netdev);
- struct rgmii_adv adv_lp;
- if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) {
+ if (priv->hw->pcs) {
pause->autoneg = 1;
- if (!adv_lp.pause)
- return -EOPNOTSUPP;
return 0;
} else {
return phylink_ethtool_set_pauseparam(priv->phylink, pause);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index b948df1bff9a..f1abf4242cd2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1062,8 +1062,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
interface, speed);
if (ret < 0)
netdev_err(priv->dev,
- "failed to configure transmit clock for %dMbps: %pe\n",
- speed, ERR_PTR(ret));
+ "failed to configure %s transmit clock for %dMbps: %pe\n",
+ phy_modes(interface), speed, ERR_PTR(ret));
}
stmmac_mac_set(priv, priv->ioaddr, true);
@@ -2596,7 +2596,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
budget = min(budget, stmmac_tx_avail(priv, queue));
- while (budget-- > 0) {
+ for (; budget > 0; budget--) {
struct stmmac_metadata_request meta_req;
struct xsk_tx_metadata *meta = NULL;
dma_addr_t dma_addr;
@@ -3586,7 +3586,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
}
if (priv->hw->pcs)
- stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
+ stmmac_pcs_ctrl_ane(priv, 1, priv->hw->ps, 0);
/* set TX and RX rings length */
stmmac_set_rings_length(priv);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
index 1bdf87b237c4..4a684c97dfae 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
@@ -16,6 +16,8 @@
/* PCS registers (AN/TBI/SGMII/RGMII) offsets */
#define GMAC_AN_CTRL(x) (x) /* AN control */
#define GMAC_AN_STATUS(x) (x + 0x4) /* AN status */
+
+/* ADV, LPA and EXP are only available for the TBI and RTBI interfaces */
#define GMAC_ANE_ADV(x) (x + 0x8) /* ANE Advertisement */
#define GMAC_ANE_LPA(x) (x + 0xc) /* ANE link partener ability */
#define GMAC_ANE_EXP(x) (x + 0x10) /* ANE expansion */
@@ -107,34 +109,4 @@ static inline void dwmac_ctrl_ane(void __iomem *ioaddr, u32 reg, bool ane,
writel(value, ioaddr + GMAC_AN_CTRL(reg));
}
-
-/**
- * dwmac_get_adv_lp - Get ADV and LP cap
- * @ioaddr: IO registers pointer
- * @reg: Base address of the AN Control Register.
- * @adv_lp: structure to store the adv,lp status
- * Description: this is to expose the ANE advertisement and Link partner ability
- * status to ethtool support.
- */
-static inline void dwmac_get_adv_lp(void __iomem *ioaddr, u32 reg,
- struct rgmii_adv *adv_lp)
-{
- u32 value = readl(ioaddr + GMAC_ANE_ADV(reg));
-
- if (value & GMAC_ANE_FD)
- adv_lp->duplex = DUPLEX_FULL;
- if (value & GMAC_ANE_HD)
- adv_lp->duplex |= DUPLEX_HALF;
-
- adv_lp->pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
-
- value = readl(ioaddr + GMAC_ANE_LPA(reg));
-
- if (value & GMAC_ANE_FD)
- adv_lp->lp_duplex = DUPLEX_FULL;
- if (value & GMAC_ANE_HD)
- adv_lp->lp_duplex = DUPLEX_HALF;
-
- adv_lp->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
-}
#endif /* __STMMAC_PCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index b80c1efdb323..030fcf1b5993 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -60,7 +60,7 @@ static int dwmac1000_validate_mcast_bins(struct device *dev, int mcast_bins)
* Description:
* This function validates the number of Unicast address entries supported
* by a particular Synopsys 10/100/1000 controller. The Synopsys controller
- * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter
+ * supports 1..32, 64, or 128 Unicast filter entries for its Unicast filter
* logic. This function validates a valid, supported configuration is
* selected, and defaults to 1 Unicast address if an unsupported
* configuration is selected.
@@ -410,6 +410,7 @@ static const char * const stmmac_gmac4_compats[] = {
"snps,dwmac-4.00",
"snps,dwmac-4.10a",
"snps,dwmac-4.20a",
+ "snps,dwmac-5.00a",
"snps,dwmac-5.10a",
"snps,dwmac-5.20",
"snps,dwmac-5.30a",
@@ -579,6 +580,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
plat->pmt = 1;
if (of_property_read_bool(np, "snps,tso"))
plat->flags |= STMMAC_FLAG_TSO_EN;
+ of_property_read_u32(np, "snps,multicast-filter-bins",
+ &plat->multicast_filter_bins);
}
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index ddca8fc7883e..893216b0e08d 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3336,7 +3336,7 @@ static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
addr = np->ops->map_page(np->device, page, 0,
PAGE_SIZE, DMA_FROM_DEVICE);
- if (!addr) {
+ if (np->ops->mapping_error(np->device, addr)) {
__free_page(page);
return -ENOMEM;
}
@@ -5825,7 +5825,7 @@ static int niu_init_mac(struct niu *np)
/* This looks hookey but the RX MAC reset we just did will
* undo some of the state we setup in niu_init_tx_mac() so we
* have to call it again. In particular, the RX MAC reset will
- * set the XMAC_MAX register back to it's default value.
+ * set the XMAC_MAX register back to its default value.
*/
niu_init_tx_mac(np);
niu_enable_tx_mac(np, 1);
@@ -6676,6 +6676,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
len = skb_headlen(skb);
mapping = np->ops->map_single(np->device, skb->data,
len, DMA_TO_DEVICE);
+ if (np->ops->mapping_error(np->device, mapping))
+ goto out_drop;
prod = rp->prod;
@@ -6717,6 +6719,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
mapping = np->ops->map_page(np->device, skb_frag_page(frag),
skb_frag_off(frag), len,
DMA_TO_DEVICE);
+ if (np->ops->mapping_error(np->device, mapping))
+ goto out_unmap;
rp->tx_buffs[prod].skb = NULL;
rp->tx_buffs[prod].mapping = mapping;
@@ -6741,6 +6745,19 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
out:
return NETDEV_TX_OK;
+out_unmap:
+ while (i--) {
+ const skb_frag_t *frag;
+
+ prod = PREVIOUS_TX(rp, prod);
+ frag = &skb_shinfo(skb)->frags[i];
+ np->ops->unmap_page(np->device, rp->tx_buffs[prod].mapping,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ }
+
+ np->ops->unmap_single(np->device, rp->tx_buffs[rp->prod].mapping,
+ skb_headlen(skb), DMA_TO_DEVICE);
+
out_drop:
rp->tx_errors++;
kfree_skb(skb);
@@ -7077,8 +7094,10 @@ static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key)
}
-static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
+static int niu_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *nfc)
{
+ struct niu *np = netdev_priv(dev);
u64 class;
nfc->data = 0;
@@ -7290,9 +7309,6 @@ static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
int ret = 0;
switch (cmd->cmd) {
- case ETHTOOL_GRXFH:
- ret = niu_get_hash_opts(np, cmd);
- break;
case ETHTOOL_GRXRINGS:
cmd->data = np->num_rx_rings;
break;
@@ -7313,8 +7329,11 @@ static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return ret;
}
-static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
+static int niu_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct niu *np = netdev_priv(dev);
u64 class;
u64 flow_key = 0;
unsigned long flags;
@@ -7656,9 +7675,6 @@ static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
int ret = 0;
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = niu_set_hash_opts(np, cmd);
- break;
case ETHTOOL_SRXCLSRLINS:
ret = niu_add_ethtool_tcam_entry(np, cmd);
break;
@@ -7912,6 +7928,8 @@ static const struct ethtool_ops niu_ethtool_ops = {
.set_phys_id = niu_set_phys_id,
.get_rxnfc = niu_get_nfc,
.set_rxnfc = niu_set_nfc,
+ .get_rxfh_fields = niu_get_rxfh_fields,
+ .set_rxfh_fields = niu_set_rxfh_fields,
.get_link_ksettings = niu_get_link_ksettings,
.set_link_ksettings = niu_set_link_ksettings,
};
@@ -9644,6 +9662,11 @@ static void niu_pci_unmap_single(struct device *dev, u64 dma_address,
dma_unmap_single(dev, dma_address, size, direction);
}
+static int niu_pci_mapping_error(struct device *dev, u64 addr)
+{
+ return dma_mapping_error(dev, addr);
+}
+
static const struct niu_ops niu_pci_ops = {
.alloc_coherent = niu_pci_alloc_coherent,
.free_coherent = niu_pci_free_coherent,
@@ -9651,6 +9674,7 @@ static const struct niu_ops niu_pci_ops = {
.unmap_page = niu_pci_unmap_page,
.map_single = niu_pci_map_single,
.unmap_single = niu_pci_unmap_single,
+ .mapping_error = niu_pci_mapping_error,
};
static void niu_driver_version(void)
@@ -10019,6 +10043,11 @@ static void niu_phys_unmap_single(struct device *dev, u64 dma_address,
/* Nothing to do. */
}
+static int niu_phys_mapping_error(struct device *dev, u64 dma_address)
+{
+ return false;
+}
+
static const struct niu_ops niu_phys_ops = {
.alloc_coherent = niu_phys_alloc_coherent,
.free_coherent = niu_phys_free_coherent,
@@ -10026,6 +10055,7 @@ static const struct niu_ops niu_phys_ops = {
.unmap_page = niu_phys_unmap_page,
.map_single = niu_phys_map_single,
.unmap_single = niu_phys_unmap_single,
+ .mapping_error = niu_phys_mapping_error,
};
static int niu_of_probe(struct platform_device *op)
diff --git a/drivers/net/ethernet/sun/niu.h b/drivers/net/ethernet/sun/niu.h
index 04c215f91fc0..d8368043fc3b 100644
--- a/drivers/net/ethernet/sun/niu.h
+++ b/drivers/net/ethernet/sun/niu.h
@@ -2879,6 +2879,9 @@ struct tx_ring_info {
#define NEXT_TX(tp, index) \
(((index) + 1) < (tp)->pending ? ((index) + 1) : 0)
+#define PREVIOUS_TX(tp, index) \
+ (((index) - 1) >= 0 ? ((index) - 1) : (((tp)->pending) - 1))
+
static inline u32 niu_tx_avail(struct tx_ring_info *tp)
{
return (tp->pending -
@@ -3140,6 +3143,7 @@ struct niu_ops {
enum dma_data_direction direction);
void (*unmap_single)(struct device *dev, u64 dma_address,
size_t size, enum dma_data_direction direction);
+ int (*mapping_error)(struct device *dev, u64 dma_address);
};
struct niu_link_config {
@@ -3246,8 +3250,8 @@ struct niu {
struct niu_parent *parent;
u32 flags;
-#define NIU_FLAGS_HOTPLUG_PHY_PRESENT 0x02000000 /* Removeable PHY detected*/
-#define NIU_FLAGS_HOTPLUG_PHY 0x01000000 /* Removeable PHY */
+#define NIU_FLAGS_HOTPLUG_PHY_PRESENT 0x02000000 /* Removable PHY detected*/
+#define NIU_FLAGS_HOTPLUG_PHY 0x01000000 /* Removable PHY */
#define NIU_FLAGS_VPD_VALID 0x00800000 /* VPD has valid version */
#define NIU_FLAGS_MSIX 0x00400000 /* MSI-X in use */
#define NIU_FLAGS_MCAST 0x00200000 /* multicast filter enabled */
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 4bc0e114d5ee..48f0a96c0e9e 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -451,7 +451,7 @@ static void happy_meal_tcvr_write(struct happy_meal *hp,
/* Auto negotiation. The scheme is very simple. We have a timer routine
* that keeps watching the auto negotiation process as it progresses.
* The DP83840 is first told to start doing it's thing, we set up the time
- * and place the timer state machine in it's initial state.
+ * and place the timer state machine in its initial state.
*
* Here the timer peeks at the DP83840 status registers at each click to see
* if the auto negotiation has completed, we assume here that the DP83840 PHY
diff --git a/drivers/net/ethernet/sun/sunqe.h b/drivers/net/ethernet/sun/sunqe.h
index 0daed05b7c83..300631e8ac0d 100644
--- a/drivers/net/ethernet/sun/sunqe.h
+++ b/drivers/net/ethernet/sun/sunqe.h
@@ -36,7 +36,7 @@
#define GLOB_PSIZE_6144 0x10 /* 6k packet size */
#define GLOB_PSIZE_8192 0x11 /* 8k packet size */
-/* In MACE mode, there are four qe channels. Each channel has it's own
+/* In MACE mode, there are four qe channels. Each channel has its own
* status bits in the QEC status register. This macro picks out the
* ones you want.
*/
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index fc77f424f90b..2cee1f05ac47 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -276,7 +276,7 @@ static irqreturn_t bdx_isr_napi(int irq, void *dev)
* currently intrs are disabled (since we read ISR),
* and we have failed to register next poll.
* so we read the regs to trigger chip
- * and allow further interupts. */
+ * and allow further interrupts. */
READ_REG(priv, regTXF_WPTR_0);
READ_REG(priv, regRXD_WPTR_0);
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index f20d1ff192ef..ecd6ecac87bb 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -856,8 +856,6 @@ static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
{
struct sk_buff *skb;
- len += AM65_CPSW_HEADROOM;
-
skb = build_skb(page_addr, len);
if (unlikely(!skb))
return NULL;
@@ -1344,7 +1342,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
}
skb = am65_cpsw_build_skb(page_addr, ndev,
- AM65_CPSW_MAX_PACKET_SIZE, headroom);
+ PAGE_SIZE, headroom);
if (unlikely(!skb)) {
new_page = page;
goto requeue;
@@ -2602,6 +2600,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
return -ENOENT;
for_each_child_of_node(node, port_np) {
+ phy_interface_t phy_if;
struct am65_cpsw_port *port;
u32 port_id;
@@ -2667,14 +2666,36 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
/* get phy/link info */
port->slave.port_np = of_node_get(port_np);
- ret = of_get_phy_mode(port_np, &port->slave.phy_if);
+ ret = of_get_phy_mode(port_np, &phy_if);
if (ret) {
dev_err(dev, "%pOF read phy-mode err %d\n",
port_np, ret);
goto of_node_put;
}
- ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET, port->slave.phy_if);
+ /* CPSW controllers supported by this driver have a fixed
+ * internal TX delay in RGMII mode. Fix up PHY mode to account
+ * for this and warn about Device Trees that claim to have a TX
+ * delay on the PCB.
+ */
+ switch (phy_if) {
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ phy_if = PHY_INTERFACE_MODE_RGMII_RXID;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ phy_if = PHY_INTERFACE_MODE_RGMII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ dev_warn(dev,
+ "RGMII mode without internal TX delay unsupported; please fix your Device Tree\n");
+ break;
+ default:
+ break;
+ }
+
+ port->slave.phy_if = phy_if;
+ ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET, phy_if);
if (ret)
goto of_node_put;
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index dbbea9146040..2ba4c8795d60 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -181,7 +181,7 @@ void cpts_misc_interrupt(struct cpts *cpts)
}
EXPORT_SYMBOL_GPL(cpts_misc_interrupt);
-static u64 cpts_systim_read(const struct cyclecounter *cc)
+static u64 cpts_systim_read(struct cyclecounter *cc)
{
struct cpts *cpts = container_of(cc, struct cpts, cc);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c
index ddfd1c02a885..da53eb04b0a4 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.c
@@ -288,8 +288,12 @@ static int prueth_fw_offload_buffer_setup(struct prueth_emac *emac)
int i;
addr = lower_32_bits(prueth->msmcram.pa);
- if (slice)
- addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
+ if (slice) {
+ if (prueth->pdata.banked_ms_ram)
+ addr += MSMC_RAM_BANK_SIZE;
+ else
+ addr += PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE;
+ }
if (addr % SZ_64K) {
dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n");
@@ -297,43 +301,66 @@ static int prueth_fw_offload_buffer_setup(struct prueth_emac *emac)
}
bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
- /* workaround for f/w bug. bpool 0 needs to be initialized */
- for (i = 0; i < PRUETH_NUM_BUF_POOLS; i++) {
+
+ /* Configure buffer pools for forwarding buffers
+ * - used by firmware to store packets to be forwarded to other port
+ * - 8 total pools per slice
+ */
+ for (i = 0; i < PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; i++) {
writel(addr, &bpool_cfg[i].addr);
- writel(PRUETH_EMAC_BUF_POOL_SIZE, &bpool_cfg[i].len);
- addr += PRUETH_EMAC_BUF_POOL_SIZE;
+ writel(PRUETH_SW_FWD_BUF_POOL_SIZE, &bpool_cfg[i].len);
+ addr += PRUETH_SW_FWD_BUF_POOL_SIZE;
}
- if (!slice)
- addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
- else
- addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST;
-
- for (i = PRUETH_NUM_BUF_POOLS;
- i < 2 * PRUETH_SW_NUM_BUF_POOLS_HOST + PRUETH_NUM_BUF_POOLS;
- i++) {
- /* The driver only uses first 4 queues per PRU so only initialize them */
- if (i % PRUETH_SW_NUM_BUF_POOLS_HOST < PRUETH_SW_NUM_BUF_POOLS_PER_PRU) {
- writel(addr, &bpool_cfg[i].addr);
- writel(PRUETH_SW_BUF_POOL_SIZE_HOST, &bpool_cfg[i].len);
- addr += PRUETH_SW_BUF_POOL_SIZE_HOST;
+ /* Configure buffer pools for Local Injection buffers
+ * - used by firmware to store packets received from host core
+ * - 16 total pools per slice
+ */
+ for (i = 0; i < PRUETH_NUM_LI_BUF_POOLS_PER_SLICE; i++) {
+ int cfg_idx = i + PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE;
+
+ /* The driver only uses first 4 queues per PRU,
+ * so only initialize buffer for them
+ */
+ if ((i % PRUETH_NUM_LI_BUF_POOLS_PER_PORT_PER_SLICE)
+ < PRUETH_SW_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE) {
+ writel(addr, &bpool_cfg[cfg_idx].addr);
+ writel(PRUETH_SW_LI_BUF_POOL_SIZE,
+ &bpool_cfg[cfg_idx].len);
+ addr += PRUETH_SW_LI_BUF_POOL_SIZE;
} else {
- writel(0, &bpool_cfg[i].addr);
- writel(0, &bpool_cfg[i].len);
+ writel(0, &bpool_cfg[cfg_idx].addr);
+ writel(0, &bpool_cfg[cfg_idx].len);
}
}
- if (!slice)
- addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST;
- else
- addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
+ /* Express RX buffer queue
+ * - used by firmware to store express packets to be transmitted
+ * to the host core
+ */
+ rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET;
+ for (i = 0; i < 3; i++)
+ writel(addr, &rxq_ctx->start[i]);
+
+ addr += PRUETH_SW_HOST_EXP_BUF_POOL_SIZE;
+ writel(addr, &rxq_ctx->end);
+ /* Pre-emptible RX buffer queue
+ * - used by firmware to store preemptible packets to be transmitted
+ * to the host core
+ */
rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET;
for (i = 0; i < 3; i++)
writel(addr, &rxq_ctx->start[i]);
- addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
- writel(addr - SZ_2K, &rxq_ctx->end);
+ addr += PRUETH_SW_HOST_PRE_BUF_POOL_SIZE;
+ writel(addr, &rxq_ctx->end);
+
+ /* Set pointer for default dropped packet write
+ * - used by firmware to temporarily store packet to be dropped
+ */
+ rxq_ctx = emac->dram.va + DEFAULT_MSMC_Q_OFFSET;
+ writel(addr, &rxq_ctx->start[0]);
return 0;
}
@@ -347,13 +374,13 @@ static int prueth_emac_buffer_setup(struct prueth_emac *emac)
u32 addr;
int i;
- /* Layout to have 64KB aligned buffer pool
- * |BPOOL0|BPOOL1|RX_CTX0|RX_CTX1|
- */
-
addr = lower_32_bits(prueth->msmcram.pa);
- if (slice)
- addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
+ if (slice) {
+ if (prueth->pdata.banked_ms_ram)
+ addr += MSMC_RAM_BANK_SIZE;
+ else
+ addr += PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE;
+ }
if (addr % SZ_64K) {
dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n");
@@ -361,39 +388,66 @@ static int prueth_emac_buffer_setup(struct prueth_emac *emac)
}
bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
- /* workaround for f/w bug. bpool 0 needs to be initilalized */
- writel(addr, &bpool_cfg[0].addr);
- writel(0, &bpool_cfg[0].len);
- for (i = PRUETH_EMAC_BUF_POOL_START;
- i < PRUETH_EMAC_BUF_POOL_START + PRUETH_NUM_BUF_POOLS;
- i++) {
- writel(addr, &bpool_cfg[i].addr);
- writel(PRUETH_EMAC_BUF_POOL_SIZE, &bpool_cfg[i].len);
- addr += PRUETH_EMAC_BUF_POOL_SIZE;
+ /* Configure buffer pools for forwarding buffers
+ * - in mac mode - no forwarding so initialize all pools to 0
+ * - 8 total pools per slice
+ */
+ for (i = 0; i < PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; i++) {
+ writel(0, &bpool_cfg[i].addr);
+ writel(0, &bpool_cfg[i].len);
}
- if (!slice)
- addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
- else
- addr += PRUETH_EMAC_RX_CTX_BUF_SIZE * 2;
+ /* Configure buffer pools for Local Injection buffers
+ * - used by firmware to store packets received from host core
+ * - 16 total pools per slice
+ */
+ bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
+ for (i = 0; i < PRUETH_NUM_LI_BUF_POOLS_PER_SLICE; i++) {
+ int cfg_idx = i + PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE;
+
+ /* In EMAC mode, only first 4 buffers are used,
+ * as 1 slice needs to handle only 1 port
+ */
+ if (i < PRUETH_EMAC_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE) {
+ writel(addr, &bpool_cfg[cfg_idx].addr);
+ writel(PRUETH_EMAC_LI_BUF_POOL_SIZE,
+ &bpool_cfg[cfg_idx].len);
+ addr += PRUETH_EMAC_LI_BUF_POOL_SIZE;
+ } else {
+ writel(0, &bpool_cfg[cfg_idx].addr);
+ writel(0, &bpool_cfg[cfg_idx].len);
+ }
+ }
- /* Pre-emptible RX buffer queue */
- rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET;
+ /* Express RX buffer queue
+ * - used by firmware to store express packets to be transmitted
+ * to host core
+ */
+ rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET;
for (i = 0; i < 3; i++)
writel(addr, &rxq_ctx->start[i]);
- addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
+ addr += PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE;
writel(addr, &rxq_ctx->end);
- /* Express RX buffer queue */
- rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET;
+ /* Pre-emptible RX buffer queue
+ * - used by firmware to store preemptible packets to be transmitted
+ * to host core
+ */
+ rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET;
for (i = 0; i < 3; i++)
writel(addr, &rxq_ctx->start[i]);
- addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
+ addr += PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE;
writel(addr, &rxq_ctx->end);
+ /* Set pointer for default dropped packet write
+ * - used by firmware to temporarily store packet to be dropped
+ */
+ rxq_ctx = emac->dram.va + DEFAULT_MSMC_Q_OFFSET;
+ writel(addr, &rxq_ctx->start[0]);
+
return 0;
}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.h b/drivers/net/ethernet/ti/icssg/icssg_config.h
index c884e9fa099e..60d69744ffae 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.h
@@ -26,21 +26,71 @@ struct icssg_flow_cfg {
#define PRUETH_MAX_RX_FLOWS 1 /* excluding default flow */
#define PRUETH_RX_FLOW_DATA 0
-#define PRUETH_EMAC_BUF_POOL_SIZE SZ_8K
-#define PRUETH_EMAC_POOLS_PER_SLICE 24
-#define PRUETH_EMAC_BUF_POOL_START 8
-#define PRUETH_NUM_BUF_POOLS 8
-#define PRUETH_EMAC_RX_CTX_BUF_SIZE SZ_16K /* per slice */
-#define MSMC_RAM_SIZE \
- (2 * (PRUETH_EMAC_BUF_POOL_SIZE * PRUETH_NUM_BUF_POOLS + \
- PRUETH_EMAC_RX_CTX_BUF_SIZE * 2))
-
-#define PRUETH_SW_BUF_POOL_SIZE_HOST SZ_4K
-#define PRUETH_SW_NUM_BUF_POOLS_HOST 8
-#define PRUETH_SW_NUM_BUF_POOLS_PER_PRU 4
-#define MSMC_RAM_SIZE_SWITCH_MODE \
- (MSMC_RAM_SIZE + \
- (2 * PRUETH_SW_BUF_POOL_SIZE_HOST * PRUETH_SW_NUM_BUF_POOLS_HOST))
+/* Defines for forwarding path buffer pools:
+ * - used by firmware to store packets to be forwarded to other port
+ * - 8 total pools per slice
+ * - only used in switch mode (as no forwarding in mac mode)
+ */
+#define PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE 8
+#define PRUETH_SW_FWD_BUF_POOL_SIZE (SZ_8K)
+
+/* Defines for local injection path buffer pools:
+ * - used by firmware to store packets received from host core
+ * - 16 total pools per slice
+ * - 8 pools per port per slice and each slice handles both ports
+ * - only 4 out of 8 pools used per port (as only 4 real QoS levels in ICSSG)
+ * - switch mode: 8 total pools used
+ * - mac mode: 4 total pools used
+ */
+#define PRUETH_NUM_LI_BUF_POOLS_PER_SLICE 16
+#define PRUETH_NUM_LI_BUF_POOLS_PER_PORT_PER_SLICE 8
+#define PRUETH_SW_LI_BUF_POOL_SIZE SZ_4K
+#define PRUETH_SW_USED_LI_BUF_POOLS_PER_SLICE 8
+#define PRUETH_SW_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE 4
+#define PRUETH_EMAC_LI_BUF_POOL_SIZE SZ_8K
+#define PRUETH_EMAC_USED_LI_BUF_POOLS_PER_SLICE 4
+#define PRUETH_EMAC_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE 4
+
+/* Defines for host egress path - express and preemptible buffers
+ * - used by firmware to store express and preemptible packets
+ * to be transmitted to host core
+ * - used by both mac/switch modes
+ */
+#define PRUETH_SW_HOST_EXP_BUF_POOL_SIZE SZ_16K
+#define PRUETH_SW_HOST_PRE_BUF_POOL_SIZE (SZ_16K - SZ_2K)
+#define PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE PRUETH_SW_HOST_EXP_BUF_POOL_SIZE
+#define PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE PRUETH_SW_HOST_PRE_BUF_POOL_SIZE
+
+/* Buffer used by firmware to temporarily store packet to be dropped */
+#define PRUETH_SW_DROP_PKT_BUF_SIZE SZ_2K
+#define PRUETH_EMAC_DROP_PKT_BUF_SIZE PRUETH_SW_DROP_PKT_BUF_SIZE
+
+/* Total switch mode memory usage for buffers per slice */
+#define PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE \
+ (PRUETH_SW_FWD_BUF_POOL_SIZE * PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE + \
+ PRUETH_SW_LI_BUF_POOL_SIZE * PRUETH_SW_USED_LI_BUF_POOLS_PER_SLICE + \
+ PRUETH_SW_HOST_EXP_BUF_POOL_SIZE + \
+ PRUETH_SW_HOST_PRE_BUF_POOL_SIZE + \
+ PRUETH_SW_DROP_PKT_BUF_SIZE)
+
+/* Total switch mode memory usage for all buffers */
+#define PRUETH_SW_TOTAL_BUF_SIZE \
+ (2 * PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE)
+
+/* Total mac mode memory usage for buffers per slice */
+#define PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE \
+ (PRUETH_EMAC_LI_BUF_POOL_SIZE * \
+ PRUETH_EMAC_USED_LI_BUF_POOLS_PER_SLICE + \
+ PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE + \
+ PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE + \
+ PRUETH_EMAC_DROP_PKT_BUF_SIZE)
+
+/* Total mac mode memory usage for all buffers */
+#define PRUETH_EMAC_TOTAL_BUF_SIZE \
+ (2 * PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE)
+
+/* Size of 1 bank of MSMC/OC_SRAM memory */
+#define MSMC_RAM_BANK_SIZE SZ_256K
#define PRUETH_SWITCH_FDB_MASK ((SIZE_OF_FDB / NUMBER_OF_FDB_BUCKET_ENTRIES) - 1)
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 86fc1278127c..2b973d6e2341 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -125,45 +125,6 @@ static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct icssg_firmwares icssg_hsr_firmwares[] = {
- {
- .pru = "ti-pruss/am65x-sr2-pru0-pruhsr-fw.elf",
- .rtu = "ti-pruss/am65x-sr2-rtu0-pruhsr-fw.elf",
- .txpru = "ti-pruss/am65x-sr2-txpru0-pruhsr-fw.elf",
- },
- {
- .pru = "ti-pruss/am65x-sr2-pru1-pruhsr-fw.elf",
- .rtu = "ti-pruss/am65x-sr2-rtu1-pruhsr-fw.elf",
- .txpru = "ti-pruss/am65x-sr2-txpru1-pruhsr-fw.elf",
- }
-};
-
-static struct icssg_firmwares icssg_switch_firmwares[] = {
- {
- .pru = "ti-pruss/am65x-sr2-pru0-prusw-fw.elf",
- .rtu = "ti-pruss/am65x-sr2-rtu0-prusw-fw.elf",
- .txpru = "ti-pruss/am65x-sr2-txpru0-prusw-fw.elf",
- },
- {
- .pru = "ti-pruss/am65x-sr2-pru1-prusw-fw.elf",
- .rtu = "ti-pruss/am65x-sr2-rtu1-prusw-fw.elf",
- .txpru = "ti-pruss/am65x-sr2-txpru1-prusw-fw.elf",
- }
-};
-
-static struct icssg_firmwares icssg_emac_firmwares[] = {
- {
- .pru = "ti-pruss/am65x-sr2-pru0-prueth-fw.elf",
- .rtu = "ti-pruss/am65x-sr2-rtu0-prueth-fw.elf",
- .txpru = "ti-pruss/am65x-sr2-txpru0-prueth-fw.elf",
- },
- {
- .pru = "ti-pruss/am65x-sr2-pru1-prueth-fw.elf",
- .rtu = "ti-pruss/am65x-sr2-rtu1-prueth-fw.elf",
- .txpru = "ti-pruss/am65x-sr2-txpru1-prueth-fw.elf",
- }
-};
-
static int prueth_start(struct rproc *rproc, const char *fw_name)
{
int ret;
@@ -186,11 +147,13 @@ static int prueth_emac_start(struct prueth *prueth)
int ret, slice;
if (prueth->is_switch_mode)
- firmwares = icssg_switch_firmwares;
- else if (prueth->is_hsr_offload_mode)
- firmwares = icssg_hsr_firmwares;
+ firmwares = prueth->icssg_switch_firmwares;
+ else if (prueth->is_hsr_offload_mode && HSR_V1 == prueth->hsr_prp_version)
+ firmwares = prueth->icssg_hsr_firmwares;
+ else if (prueth->is_hsr_offload_mode && PRP_V1 == prueth->hsr_prp_version)
+ firmwares = prueth->icssg_prp_firmwares;
else
- firmwares = icssg_emac_firmwares;
+ firmwares = prueth->icssg_emac_firmwares;
for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
ret = prueth_start(prueth->pru[slice], firmwares[slice].pru);
@@ -1566,6 +1529,7 @@ static int prueth_netdevice_event(struct notifier_block *unused,
struct netdev_notifier_changeupper_info *info;
struct prueth_emac *emac = netdev_priv(ndev);
struct prueth *prueth = emac->prueth;
+ enum hsr_version hsr_ndev_version;
int ret = NOTIFY_DONE;
if (ndev->netdev_ops != &emac_netdev_ops)
@@ -1577,6 +1541,11 @@ static int prueth_netdevice_event(struct notifier_block *unused,
if ((ndev->features & NETIF_PRUETH_HSR_OFFLOAD_FEATURES) &&
is_hsr_master(info->upper_dev)) {
+ hsr_get_version(info->upper_dev, &hsr_ndev_version);
+ if (hsr_ndev_version != HSR_V1 && hsr_ndev_version != PRP_V1)
+ return -EOPNOTSUPP;
+ prueth->hsr_prp_version = hsr_ndev_version;
+
if (info->linking) {
if (!prueth->hsr_dev) {
prueth->hsr_dev = info->upper_dev;
@@ -1632,6 +1601,87 @@ static void prueth_unregister_notifiers(struct prueth *prueth)
unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
}
+static void icssg_read_firmware_names(struct device_node *np,
+ struct icssg_firmwares *fw)
+{
+ int i;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ of_property_read_string_index(np, "firmware-name", i * 3 + 0,
+ &fw[i].pru);
+ of_property_read_string_index(np, "firmware-name", i * 3 + 1,
+ &fw[i].rtu);
+ of_property_read_string_index(np, "firmware-name", i * 3 + 2,
+ &fw[i].txpru);
+ }
+}
+
+/* icssg_firmware_name_replace - Replace a substring in firmware name
+ * @dev: device pointer for memory allocation
+ * @src: source firmware name string
+ * @from: substring to replace
+ * @to: replacement substring
+ *
+ * Return: a newly allocated string with the replacement, or the original
+ * string if replacement is not possible.
+ */
+static const char *icssg_firmware_name_replace(struct device *dev,
+ const char *src,
+ const char *from,
+ const char *to)
+{
+ size_t prefix, from_len, to_len, total;
+ const char *p = strstr(src, from);
+ char *buf;
+
+ if (!p)
+ return src; /* fallback: no replacement, use original */
+
+ prefix = p - src;
+ from_len = strlen(from);
+ to_len = strlen(to);
+ total = strlen(src) - from_len + to_len + 1;
+
+ buf = devm_kzalloc(dev, total, GFP_KERNEL);
+ if (!buf)
+ return src; /* fallback: allocation failed, use original */
+
+ strscpy(buf, src, prefix + 1);
+ strscpy(buf + prefix, to, to_len + 1);
+ strscpy(buf + prefix + to_len, p + from_len, total - prefix - to_len);
+
+ return buf;
+}
+
+/**
+ * icssg_mode_firmware_names - Generate firmware names for a specific mode
+ * @dev: device pointer for logging and context
+ * @src: source array of firmware name structures
+ * @dst: destination array to store updated firmware name structures
+ * @from: substring in firmware names to be replaced
+ * @to: substring to replace @from in firmware names
+ *
+ * Iterates over all MACs and replaces occurrences of the @from substring
+ * with @to in the firmware names (pru, rtu, txpru) for each MAC. The
+ * updated firmware names are stored in the @dst array.
+ */
+static void icssg_mode_firmware_names(struct device *dev,
+ struct icssg_firmwares *src,
+ struct icssg_firmwares *dst,
+ const char *from, const char *to)
+{
+ int i;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ dst[i].pru = icssg_firmware_name_replace(dev, src[i].pru,
+ from, to);
+ dst[i].rtu = icssg_firmware_name_replace(dev, src[i].rtu,
+ from, to);
+ dst[i].txpru = icssg_firmware_name_replace(dev, src[i].txpru,
+ from, to);
+ }
+}
+
static int prueth_probe(struct platform_device *pdev)
{
struct device_node *eth_node, *eth_ports_node;
@@ -1764,10 +1814,15 @@ static int prueth_probe(struct platform_device *pdev)
goto put_mem;
}
- msmc_ram_size = MSMC_RAM_SIZE;
prueth->is_switchmode_supported = prueth->pdata.switch_mode;
- if (prueth->is_switchmode_supported)
- msmc_ram_size = MSMC_RAM_SIZE_SWITCH_MODE;
+ if (prueth->pdata.banked_ms_ram) {
+ /* Reserve 2 MSMC RAM banks for buffers to avoid arbitration */
+ msmc_ram_size = (2 * MSMC_RAM_BANK_SIZE);
+ } else {
+ msmc_ram_size = PRUETH_EMAC_TOTAL_BUF_SIZE;
+ if (prueth->is_switchmode_supported)
+ msmc_ram_size = PRUETH_SW_TOTAL_BUF_SIZE;
+ }
/* NOTE: FW bug needs buffer base to be 64KB aligned */
prueth->msmcram.va =
@@ -1808,6 +1863,17 @@ static int prueth_probe(struct platform_device *pdev)
icss_iep_init_fw(prueth->iep1);
}
+ /* Read EMAC firmware names from device tree */
+ icssg_read_firmware_names(np, prueth->icssg_emac_firmwares);
+
+ /* Generate other mode firmware names based on EMAC firmware names */
+ icssg_mode_firmware_names(dev, prueth->icssg_emac_firmwares,
+ prueth->icssg_switch_firmwares, "eth", "sw");
+ icssg_mode_firmware_names(dev, prueth->icssg_emac_firmwares,
+ prueth->icssg_hsr_firmwares, "eth", "hsr");
+ icssg_mode_firmware_names(dev, prueth->icssg_emac_firmwares,
+ prueth->icssg_prp_firmwares, "eth", "prp");
+
spin_lock_init(&prueth->vtbl_lock);
spin_lock_init(&prueth->stats_lock);
/* setup netdev interfaces */
@@ -1924,7 +1990,8 @@ put_iep0:
free_pool:
gen_pool_free(prueth->sram_pool,
- (unsigned long)prueth->msmcram.va, msmc_ram_size);
+ (unsigned long)prueth->msmcram.va,
+ prueth->msmcram.size);
put_mem:
pruss_release_mem_region(prueth->pruss, &prueth->shram);
@@ -1976,8 +2043,8 @@ static void prueth_remove(struct platform_device *pdev)
icss_iep_put(prueth->iep0);
gen_pool_free(prueth->sram_pool,
- (unsigned long)prueth->msmcram.va,
- MSMC_RAM_SIZE);
+ (unsigned long)prueth->msmcram.va,
+ prueth->msmcram.size);
pruss_release_mem_region(prueth->pruss, &prueth->shram);
@@ -1994,12 +2061,14 @@ static const struct prueth_pdata am654_icssg_pdata = {
.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
.quirk_10m_link_issue = 1,
.switch_mode = 1,
+ .banked_ms_ram = 0,
};
static const struct prueth_pdata am64x_icssg_pdata = {
.fdqring_mode = K3_RINGACC_RING_MODE_RING,
.quirk_10m_link_issue = 1,
.switch_mode = 1,
+ .banked_ms_ram = 1,
};
static const struct of_device_id prueth_dt_match[] = {
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index 23c465f1ce7f..ca8a22a4a5da 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -13,6 +13,7 @@
#include <linux/etherdevice.h>
#include <linux/genalloc.h>
#include <linux/if_vlan.h>
+#include <linux/if_hsr.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
@@ -251,17 +252,19 @@ struct prueth_emac {
* @fdqring_mode: Free desc queue mode
* @quirk_10m_link_issue: 10M link detect errata
* @switch_mode: switch firmware support
+ * @banked_ms_ram: banked memory support
*/
struct prueth_pdata {
enum k3_ring_mode fdqring_mode;
u32 quirk_10m_link_issue:1;
u32 switch_mode:1;
+ u32 banked_ms_ram:1;
};
struct icssg_firmwares {
- char *pru;
- char *rtu;
- char *txpru;
+ const char *pru;
+ const char *rtu;
+ const char *txpru;
};
/**
@@ -290,6 +293,7 @@ struct icssg_firmwares {
* @vlan_tbl: VLAN-FID table pointer
* @hw_bridge_dev: pointer to HW bridge net device
* @hsr_dev: pointer to the HSR net device
+ * @hsr_prp_version: enum to store the protocol version of hsr master
* @br_members: bitmask of bridge member ports
* @hsr_members: bitmask of hsr member ports
* @prueth_netdevice_nb: netdevice notifier block
@@ -300,6 +304,10 @@ struct icssg_firmwares {
* @is_switchmode_supported: indicates platform support for switch mode
* @switch_id: ID for mapping switch ports to bridge
* @default_vlan: Default VLAN for host
+ * @icssg_emac_firmwares: Firmware names for EMAC mode, indexed per MAC
+ * @icssg_switch_firmwares: Firmware names for SWITCH mode, indexed per MAC
+ * @icssg_hsr_firmwares: Firmware names for HSR mode, indexed per MAC
+ * @icssg_prp_firmwares: Firmware names for PRP mode, indexed per MAC
*/
struct prueth {
struct device *dev;
@@ -329,6 +337,7 @@ struct prueth {
struct net_device *hw_bridge_dev;
struct net_device *hsr_dev;
+ enum hsr_version hsr_prp_version;
u8 br_members;
u8 hsr_members;
struct notifier_block prueth_netdevice_nb;
@@ -343,6 +352,10 @@ struct prueth {
spinlock_t vtbl_lock;
/** @stats_lock: Lock for reading icssg stats */
spinlock_t stats_lock;
+ struct icssg_firmwares icssg_emac_firmwares[PRUETH_NUM_MACS];
+ struct icssg_firmwares icssg_switch_firmwares[PRUETH_NUM_MACS];
+ struct icssg_firmwares icssg_hsr_firmwares[PRUETH_NUM_MACS];
+ struct icssg_firmwares icssg_prp_firmwares[PRUETH_NUM_MACS];
};
struct emac_tx_ts_response {
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
index ff5f41bf499e..5e225310c9de 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
@@ -367,7 +367,7 @@ static irqreturn_t prueth_rx_mgm_ts_thread_sr1(int irq, void *dev_id)
return IRQ_NONE;
prueth_tx_ts_sr1(emac, (void *)page_address(page));
- page_pool_recycle_direct(page->pp, page);
+ page_pool_recycle_direct(pp_page_to_nmdesc(page)->pp, page);
return IRQ_HANDLED;
}
@@ -392,7 +392,7 @@ static irqreturn_t prueth_rx_mgm_rsp_thread(int irq, void *dev_id)
complete(&emac->cmd_complete);
}
- page_pool_recycle_direct(page->pp, page);
+ page_pool_recycle_direct(pp_page_to_nmdesc(page)->pp, page);
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_switch_map.h b/drivers/net/ethernet/ti/icssg/icssg_switch_map.h
index 490a9cc06fb0..7e053b8af3ec 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_switch_map.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_switch_map.h
@@ -180,6 +180,9 @@
/* Used to notify the FW of the current link speed */
#define PORT_LINK_SPEED_OFFSET 0x00A8
+/* 2k memory pointer reserved for default writes by PRU0*/
+#define DEFAULT_MSMC_Q_OFFSET 0x00AC
+
/* TAS gate mask for windows list0 */
#define TAS_GATE_MASK_LIST0 0x0100
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
index e5fc942c28cc..424ec3212128 100644
--- a/drivers/net/ethernet/wangxun/Kconfig
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -64,4 +64,39 @@ config TXGBE
To compile this driver as a module, choose M here. The module
will be called txgbe.
+config TXGBEVF
+ tristate "Wangxun(R) 10/25/40G Virtual Function Ethernet support"
+ depends on PCI
+ depends on PCI_MSI
+ depends on PTP_1588_CLOCK_OPTIONAL
+ select LIBWX
+ select PHYLINK
+ help
+ This driver supports virtual functions for SP1000A, WX1820AL,
+ WX5XXX, WX5XXXAL.
+
+ This driver was formerly named txgbevf.
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/device_drivers/ethernet/wangxun/txgbevf.rst>.
+
+ To compile this driver as a module, choose M here. MSI-X interrupt
+ support is required for this driver to work correctly.
+
+config NGBEVF
+ tristate "Wangxun(R) GbE Virtual Function Ethernet support"
+ depends on PCI_MSI
+ depends on PTP_1588_CLOCK_OPTIONAL
+ select LIBWX
+ help
+ This driver supports virtual functions for WX1860, WX1860AL.
+
+ This driver was formerly named ngbevf.
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/device_drivers/ethernet/wangxun/ngbevf.rst>.
+
+ To compile this driver as a module, choose M here. MSI-X interrupt
+ support is required for this driver to work correctly.
+
endif # NET_VENDOR_WANGXUN
diff --git a/drivers/net/ethernet/wangxun/Makefile b/drivers/net/ethernet/wangxun/Makefile
index ca19311dbe38..0a71a710b717 100644
--- a/drivers/net/ethernet/wangxun/Makefile
+++ b/drivers/net/ethernet/wangxun/Makefile
@@ -5,4 +5,6 @@
obj-$(CONFIG_LIBWX) += libwx/
obj-$(CONFIG_TXGBE) += txgbe/
+obj-$(CONFIG_TXGBEVF) += txgbevf/
obj-$(CONFIG_NGBE) += ngbe/
+obj-$(CONFIG_NGBEVF) += ngbevf/
diff --git a/drivers/net/ethernet/wangxun/libwx/Makefile b/drivers/net/ethernet/wangxun/libwx/Makefile
index 9b78b604a94e..a71b0ad77de3 100644
--- a/drivers/net/ethernet/wangxun/libwx/Makefile
+++ b/drivers/net/ethernet/wangxun/libwx/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_LIBWX) += libwx.o
libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o wx_ptp.o wx_mbx.o wx_sriov.o
+libwx-objs += wx_vf.o wx_vf_lib.o wx_vf_common.o
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index 0f4be72116b8..bcd07a715752 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -11,6 +11,7 @@
#include "wx_type.h"
#include "wx_lib.h"
#include "wx_sriov.h"
+#include "wx_vf.h"
#include "wx_hw.h"
static int wx_phy_read_reg_mdi(struct mii_bus *bus, int phy_addr, int devnum, int regnum)
@@ -124,6 +125,11 @@ void wx_intr_enable(struct wx *wx, u64 qmask)
{
u32 mask;
+ if (wx->pdev->is_virtfn) {
+ wr32(wx, WX_VXIMC, qmask);
+ return;
+ }
+
mask = (qmask & U32_MAX);
if (mask)
wr32(wx, WX_PX_IMC(0), mask);
@@ -1107,7 +1113,7 @@ static int wx_write_uc_addr_list(struct net_device *netdev, int pool)
* by the MO field of the MCSTCTRL. The MO field is set during initialization
* to mc_filter_type.
**/
-static u32 wx_mta_vector(struct wx *wx, u8 *mc_addr)
+u32 wx_mta_vector(struct wx *wx, u8 *mc_addr)
{
u32 vector = 0;
@@ -1827,7 +1833,7 @@ void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring)
}
EXPORT_SYMBOL(wx_disable_rx_queue);
-static void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring)
+void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring)
{
u8 reg_idx = ring->reg_idx;
u32 rxdctl;
@@ -1843,6 +1849,7 @@ static void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring)
reg_idx);
}
}
+EXPORT_SYMBOL(wx_enable_rx_queue);
static void wx_configure_srrctl(struct wx *wx,
struct wx_ring *rx_ring)
@@ -1912,7 +1919,6 @@ static void wx_configure_rx_ring(struct wx *wx,
struct wx_ring *ring)
{
u16 reg_idx = ring->reg_idx;
- union wx_rx_desc *rx_desc;
u64 rdba = ring->dma;
u32 rxdctl;
@@ -1942,9 +1948,9 @@ static void wx_configure_rx_ring(struct wx *wx,
memset(ring->rx_buffer_info, 0,
sizeof(struct wx_rx_buffer) * ring->count);
- /* initialize Rx descriptor 0 */
- rx_desc = WX_RX_DESC(ring, 0);
- rx_desc->wb.upper.length = 0;
+ /* reset ntu and ntc to place SW in sync with hardware */
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
/* enable receive descriptor ring */
wr32m(wx, WX_PX_RR_CFG(reg_idx),
@@ -2368,7 +2374,8 @@ int wx_sw_init(struct wx *wx)
wx->bus.device = PCI_SLOT(pdev->devfn);
wx->bus.func = PCI_FUNC(pdev->devfn);
- if (wx->oem_svid == PCI_VENDOR_ID_WANGXUN) {
+ if (wx->oem_svid == PCI_VENDOR_ID_WANGXUN ||
+ pdev->is_virtfn) {
wx->subsystem_vendor_id = pdev->subsystem_vendor;
wx->subsystem_device_id = pdev->subsystem_device;
} else {
@@ -2778,6 +2785,8 @@ void wx_update_stats(struct wx *wx)
hwstats->fdirmiss += rd32(wx, WX_RDB_FDIR_MISS);
}
+ /* qmprc is not cleared on read, manual reset it */
+ hwstats->qmprc = 0;
for (i = wx->num_vfs * wx->num_rx_queues_per_pool;
i < wx->mac.max_rx_queues; i++)
hwstats->qmprc += rd32(wx, WX_PX_MPRC(i));
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
index 26a56cba60b9..2393a743b564 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
@@ -29,6 +29,7 @@ void wx_mac_set_default_filter(struct wx *wx, u8 *addr);
int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool);
int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool);
void wx_flush_sw_mac_table(struct wx *wx);
+u32 wx_mta_vector(struct wx *wx, u8 *mc_addr);
int wx_set_mac(struct net_device *netdev, void *p);
void wx_disable_rx(struct wx *wx);
int wx_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
@@ -37,6 +38,7 @@ void wx_enable_sec_rx_path(struct wx *wx);
void wx_set_rx_mode(struct net_device *netdev);
int wx_change_mtu(struct net_device *netdev, int new_mtu);
void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring);
+void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring);
void wx_configure_rx(struct wx *wx);
void wx_configure(struct wx *wx);
void wx_start_hw(struct wx *wx);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 7f2e6cddfeb1..723785ef87bb 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -174,10 +174,6 @@ static void wx_dma_sync_frag(struct wx_ring *rx_ring,
skb_frag_off(frag),
skb_frag_size(frag),
DMA_FROM_DEVICE);
-
- /* If the page was released, just unmap it. */
- if (unlikely(WX_CB(skb)->page_released))
- page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
}
static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring,
@@ -227,10 +223,6 @@ static void wx_put_rx_buffer(struct wx_ring *rx_ring,
struct sk_buff *skb,
int rx_buffer_pgcnt)
{
- if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma)
- /* the page has been released from the ring */
- WX_CB(skb)->page_released = true;
-
/* clear contents of rx_buffer */
rx_buffer->page = NULL;
rx_buffer->skb = NULL;
@@ -315,7 +307,7 @@ static bool wx_alloc_mapped_page(struct wx_ring *rx_ring,
return false;
dma = page_pool_get_dma_addr(page);
- bi->page_dma = dma;
+ bi->dma = dma;
bi->page = page;
bi->page_offset = 0;
@@ -352,7 +344,7 @@ void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count)
DMA_FROM_DEVICE);
rx_desc->read.pkt_addr =
- cpu_to_le64(bi->page_dma + bi->page_offset);
+ cpu_to_le64(bi->dma + bi->page_offset);
rx_desc++;
bi++;
@@ -365,6 +357,8 @@ void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count)
/* clear the status bits for the next_to_use descriptor */
rx_desc->wb.upper.status_error = 0;
+ /* clear the length for the next_to_use descriptor */
+ rx_desc->wb.upper.length = 0;
cleaned_count--;
} while (cleaned_count);
@@ -1705,6 +1699,7 @@ static void wx_set_rss_queues(struct wx *wx)
clear_bit(WX_FLAG_FDIR_HASH, wx->flags);
+ wx->ring_feature[RING_F_FDIR].indices = 1;
/* Use Flow Director in addition to RSS to ensure the best
* distribution of flows across cores, even when an FDIR flow
* isn't matched.
@@ -1746,7 +1741,7 @@ static void wx_set_num_queues(struct wx *wx)
*/
static int wx_acquire_msix_vectors(struct wx *wx)
{
- struct irq_affinity affd = { .pre_vectors = 1 };
+ struct irq_affinity affd = { .post_vectors = 1 };
int nvecs, i;
/* We start by asking for one vector per queue pair */
@@ -1783,16 +1778,24 @@ static int wx_acquire_msix_vectors(struct wx *wx)
return nvecs;
}
- wx->msix_entry->entry = 0;
- wx->msix_entry->vector = pci_irq_vector(wx->pdev, 0);
nvecs -= 1;
for (i = 0; i < nvecs; i++) {
wx->msix_q_entries[i].entry = i;
- wx->msix_q_entries[i].vector = pci_irq_vector(wx->pdev, i + 1);
+ wx->msix_q_entries[i].vector = pci_irq_vector(wx->pdev, i);
}
wx->num_q_vectors = nvecs;
+ wx->msix_entry->entry = nvecs;
+ wx->msix_entry->vector = pci_irq_vector(wx->pdev, nvecs);
+
+ if (test_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags)) {
+ wx->msix_entry->entry = 0;
+ wx->msix_entry->vector = pci_irq_vector(wx->pdev, 0);
+ wx->msix_q_entries[0].entry = 0;
+ wx->msix_q_entries[0].vector = pci_irq_vector(wx->pdev, 1);
+ }
+
return 0;
}
@@ -1810,7 +1813,7 @@ static int wx_set_interrupt_capability(struct wx *wx)
/* We will try to get MSI-X interrupts first */
ret = wx_acquire_msix_vectors(wx);
- if (ret == 0 || (ret == -ENOMEM))
+ if (ret == 0 || (ret == -ENOMEM) || pdev->is_virtfn)
return ret;
/* Disable VMDq support */
@@ -2161,7 +2164,12 @@ int wx_init_interrupt_scheme(struct wx *wx)
int ret;
/* Number of supported queues */
- wx_set_num_queues(wx);
+ if (wx->pdev->is_virtfn) {
+ if (wx->set_num_queues)
+ wx->set_num_queues(wx);
+ } else {
+ wx_set_num_queues(wx);
+ }
/* Set interrupt mode */
ret = wx_set_interrupt_capability(wx);
@@ -2291,6 +2299,8 @@ static void wx_set_ivar(struct wx *wx, s8 direction,
if (direction == -1) {
/* other causes */
+ if (test_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags))
+ msix_vector = 0;
msix_vector |= WX_PX_IVAR_ALLOC_VAL;
index = 0;
ivar = rd32(wx, WX_PX_MISC_IVAR);
@@ -2299,8 +2309,6 @@ static void wx_set_ivar(struct wx *wx, s8 direction,
wr32(wx, WX_PX_MISC_IVAR, ivar);
} else {
/* tx or rx causes */
- if (!(wx->mac.type == wx_mac_em && wx->num_vfs == 7))
- msix_vector += 1; /* offset for queue vectors */
msix_vector |= WX_PX_IVAR_ALLOC_VAL;
index = ((16 * (queue & 1)) + (8 * direction));
ivar = rd32(wx, WX_PX_IVAR(queue >> 1));
@@ -2339,7 +2347,7 @@ void wx_write_eitr(struct wx_q_vector *q_vector)
itr_reg |= WX_PX_ITR_CNT_WDIS;
- wr32(wx, WX_PX_ITR(v_idx + 1), itr_reg);
+ wr32(wx, WX_PX_ITR(v_idx), itr_reg);
}
/**
@@ -2392,9 +2400,9 @@ void wx_configure_vectors(struct wx *wx)
wx_write_eitr(q_vector);
}
- wx_set_ivar(wx, -1, 0, 0);
+ wx_set_ivar(wx, -1, 0, v_idx);
if (pdev->msix_enabled)
- wr32(wx, WX_PX_ITR(0), 1950);
+ wr32(wx, WX_PX_ITR(v_idx), 1950);
}
EXPORT_SYMBOL(wx_configure_vectors);
@@ -2414,9 +2422,6 @@ static void wx_clean_rx_ring(struct wx_ring *rx_ring)
if (rx_buffer->skb) {
struct sk_buff *skb = rx_buffer->skb;
- if (WX_CB(skb)->page_released)
- page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
-
dev_kfree_skb(skb);
}
@@ -2440,6 +2445,9 @@ static void wx_clean_rx_ring(struct wx_ring *rx_ring)
}
}
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -2623,7 +2631,7 @@ static int wx_alloc_page_pool(struct wx_ring *rx_ring)
struct page_pool_params pp_params = {
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.order = 0,
- .pool_size = rx_ring->size,
+ .pool_size = rx_ring->count,
.nid = dev_to_node(rx_ring->dev),
.dev = rx_ring->dev,
.dma_dir = DMA_FROM_DEVICE,
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_mbx.c b/drivers/net/ethernet/wangxun/libwx/wx_mbx.c
index 73af5f11c3bd..2aa03eadf064 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_mbx.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_mbx.c
@@ -174,3 +174,246 @@ int wx_check_for_rst_pf(struct wx *wx, u16 vf)
return 0;
}
+
+static u32 wx_read_v2p_mailbox(struct wx *wx)
+{
+ u32 mailbox = rd32(wx, WX_VXMAILBOX);
+
+ mailbox |= wx->mbx.mailbox;
+ wx->mbx.mailbox |= mailbox & WX_VXMAILBOX_R2C_BITS;
+
+ return mailbox;
+}
+
+static u32 wx_mailbox_get_lock_vf(struct wx *wx)
+{
+ wr32(wx, WX_VXMAILBOX, WX_VXMAILBOX_VFU);
+ return wx_read_v2p_mailbox(wx);
+}
+
+/**
+ * wx_obtain_mbx_lock_vf - obtain mailbox lock
+ * @wx: pointer to the HW structure
+ *
+ * Return: return 0 on success and -EBUSY on failure
+ **/
+static int wx_obtain_mbx_lock_vf(struct wx *wx)
+{
+ int count = 5, ret;
+ u32 mailbox;
+
+ ret = readx_poll_timeout_atomic(wx_mailbox_get_lock_vf, wx, mailbox,
+ (mailbox & WX_VXMAILBOX_VFU),
+ 1, count);
+ if (ret)
+ wx_err(wx, "Failed to obtain mailbox lock for VF.\n");
+
+ return ret;
+}
+
+static int wx_check_for_bit_vf(struct wx *wx, u32 mask)
+{
+ u32 mailbox = wx_read_v2p_mailbox(wx);
+
+ wx->mbx.mailbox &= ~mask;
+
+ return (mailbox & mask ? 0 : -EBUSY);
+}
+
+/**
+ * wx_check_for_ack_vf - checks to see if the PF has ACK'd
+ * @wx: pointer to the HW structure
+ *
+ * Return: return 0 if the PF has set the status bit or else -EBUSY
+ **/
+static int wx_check_for_ack_vf(struct wx *wx)
+{
+ /* read clear the pf ack bit */
+ return wx_check_for_bit_vf(wx, WX_VXMAILBOX_PFACK);
+}
+
+/**
+ * wx_check_for_msg_vf - checks to see if the PF has sent mail
+ * @wx: pointer to the HW structure
+ *
+ * Return: return 0 if the PF has got req bit or else -EBUSY
+ **/
+int wx_check_for_msg_vf(struct wx *wx)
+{
+ /* read clear the pf sts bit */
+ return wx_check_for_bit_vf(wx, WX_VXMAILBOX_PFSTS);
+}
+
+/**
+ * wx_check_for_rst_vf - checks to see if the PF has reset
+ * @wx: pointer to the HW structure
+ *
+ * Return: return 0 if the PF has set the reset done and -EBUSY on failure
+ **/
+int wx_check_for_rst_vf(struct wx *wx)
+{
+ /* read clear the pf reset done bit */
+ return wx_check_for_bit_vf(wx,
+ WX_VXMAILBOX_RSTD |
+ WX_VXMAILBOX_RSTI);
+}
+
+/**
+ * wx_poll_for_msg - Wait for message notification
+ * @wx: pointer to the HW structure
+ *
+ * Return: return 0 if the VF has successfully received a message notification
+ **/
+static int wx_poll_for_msg(struct wx *wx)
+{
+ struct wx_mbx_info *mbx = &wx->mbx;
+ u32 val;
+
+ return readx_poll_timeout_atomic(wx_check_for_msg_vf, wx, val,
+ (val == 0), mbx->udelay, mbx->timeout);
+}
+
+/**
+ * wx_poll_for_ack - Wait for message acknowledgment
+ * @wx: pointer to the HW structure
+ *
+ * Return: return 0 if the VF has successfully received a message ack
+ **/
+static int wx_poll_for_ack(struct wx *wx)
+{
+ struct wx_mbx_info *mbx = &wx->mbx;
+ u32 val;
+
+ return readx_poll_timeout_atomic(wx_check_for_ack_vf, wx, val,
+ (val == 0), mbx->udelay, mbx->timeout);
+}
+
+/**
+ * wx_read_posted_mbx - Wait for message notification and receive message
+ * @wx: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * Return: returns 0 if it successfully received a message notification and
+ * copied it into the receive buffer.
+ **/
+int wx_read_posted_mbx(struct wx *wx, u32 *msg, u16 size)
+{
+ int ret;
+
+ ret = wx_poll_for_msg(wx);
+ /* if ack received read message, otherwise we timed out */
+ if (ret)
+ return ret;
+
+ return wx_read_mbx_vf(wx, msg, size);
+}
+
+/**
+ * wx_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * @wx: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * Return: returns 0 if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout period
+ **/
+int wx_write_posted_mbx(struct wx *wx, u32 *msg, u16 size)
+{
+ int ret;
+
+ /* send msg */
+ ret = wx_write_mbx_vf(wx, msg, size);
+ /* if msg sent wait until we receive an ack */
+ if (ret)
+ return ret;
+
+ return wx_poll_for_ack(wx);
+}
+
+/**
+ * wx_write_mbx_vf - Write a message to the mailbox
+ * @wx: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * Return: returns 0 if it successfully copied message into the buffer
+ **/
+int wx_write_mbx_vf(struct wx *wx, u32 *msg, u16 size)
+{
+ struct wx_mbx_info *mbx = &wx->mbx;
+ int ret, i;
+
+ /* mbx->size is up to 15 */
+ if (size > mbx->size) {
+ wx_err(wx, "Invalid mailbox message size %d", size);
+ return -EINVAL;
+ }
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret = wx_obtain_mbx_lock_vf(wx);
+ if (ret)
+ return ret;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ wx_check_for_msg_vf(wx);
+ wx_check_for_ack_vf(wx);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ wr32a(wx, WX_VXMBMEM, i, msg[i]);
+
+ /* Drop VFU and interrupt the PF to tell it a message has been sent */
+ wr32(wx, WX_VXMAILBOX, WX_VXMAILBOX_REQ);
+
+ return 0;
+}
+
+/**
+ * wx_read_mbx_vf - Reads a message from the inbox intended for vf
+ * @wx: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * Return: returns 0 if it successfully copied message into the buffer
+ **/
+int wx_read_mbx_vf(struct wx *wx, u32 *msg, u16 size)
+{
+ struct wx_mbx_info *mbx = &wx->mbx;
+ int ret, i;
+
+ /* limit read to size of mailbox and mbx->size is up to 15 */
+ if (size > mbx->size)
+ size = mbx->size;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret = wx_obtain_mbx_lock_vf(wx);
+ if (ret)
+ return ret;
+
+ /* copy the message from the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = rd32a(wx, WX_VXMBMEM, i);
+
+ /* Acknowledge receipt and release mailbox, then we're done */
+ wr32(wx, WX_VXMAILBOX, WX_VXMAILBOX_ACK);
+
+ return 0;
+}
+
+int wx_init_mbx_params_vf(struct wx *wx)
+{
+ wx->vfinfo = kzalloc(sizeof(struct vf_data_storage),
+ GFP_KERNEL);
+ if (!wx->vfinfo)
+ return -ENOMEM;
+
+ /* Initialize mailbox parameters */
+ wx->mbx.size = WX_VXMAILBOX_SIZE;
+ wx->mbx.mailbox = WX_VXMAILBOX;
+ wx->mbx.udelay = 10;
+ wx->mbx.timeout = 1000;
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_init_mbx_params_vf);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_mbx.h b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h
index 05aae138dbc3..82df9218490a 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_mbx.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h
@@ -11,6 +11,20 @@
#define WX_PXMAILBOX_ACK BIT(1) /* Ack message recv'd from VF */
#define WX_PXMAILBOX_PFU BIT(3) /* PF owns the mailbox buffer */
+/* VF Registers */
+#define WX_VXMAILBOX 0x600
+#define WX_VXMAILBOX_REQ BIT(0) /* Request for PF Ready bit */
+#define WX_VXMAILBOX_ACK BIT(1) /* Ack PF message received */
+#define WX_VXMAILBOX_VFU BIT(2) /* VF owns the mailbox buffer */
+#define WX_VXMAILBOX_PFU BIT(3) /* PF owns the mailbox buffer */
+#define WX_VXMAILBOX_PFSTS BIT(4) /* PF wrote a message in the MB */
+#define WX_VXMAILBOX_PFACK BIT(5) /* PF ack the previous VF msg */
+#define WX_VXMAILBOX_RSTI BIT(6) /* PF has reset indication */
+#define WX_VXMAILBOX_RSTD BIT(7) /* PF has indicated reset done */
+#define WX_VXMAILBOX_R2C_BITS (WX_VXMAILBOX_RSTD | \
+ WX_VXMAILBOX_PFSTS | WX_VXMAILBOX_PFACK)
+
+#define WX_VXMBMEM 0x00C00 /* 16*4B */
#define WX_PXMBMEM(i) (0x5000 + (64 * (i))) /* i=[0,63] */
#define WX_VFLRE(i) (0x4A0 + (4 * (i))) /* i=[0,1] */
@@ -74,4 +88,12 @@ int wx_check_for_rst_pf(struct wx *wx, u16 mbx_id);
int wx_check_for_msg_pf(struct wx *wx, u16 mbx_id);
int wx_check_for_ack_pf(struct wx *wx, u16 mbx_id);
+int wx_read_posted_mbx(struct wx *wx, u32 *msg, u16 size);
+int wx_write_posted_mbx(struct wx *wx, u32 *msg, u16 size);
+int wx_check_for_rst_vf(struct wx *wx);
+int wx_check_for_msg_vf(struct wx *wx);
+int wx_read_mbx_vf(struct wx *wx, u32 *msg, u16 size);
+int wx_write_mbx_vf(struct wx *wx, u32 *msg, u16 size);
+int wx_init_mbx_params_vf(struct wx *wx);
+
#endif /* _WX_MBX_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ptp.c b/drivers/net/ethernet/wangxun/libwx/wx_ptp.c
index 2c39b879f977..44f3e6505246 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ptp.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ptp.c
@@ -652,7 +652,7 @@ static int wx_ptp_set_timestamp_mode(struct wx *wx,
return 0;
}
-static u64 wx_ptp_read(const struct cyclecounter *hw_cc)
+static u64 wx_ptp_read(struct cyclecounter *hw_cc)
{
struct wx *wx = container_of(hw_cc, struct wx, hw_cc);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
index e8656d9d733b..c82ae137756c 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
@@ -64,6 +64,7 @@ static void wx_sriov_clear_data(struct wx *wx)
wr32m(wx, WX_PSR_VM_CTL, WX_PSR_VM_CTL_POOL_MASK, 0);
wx->ring_feature[RING_F_VMDQ].offset = 0;
+ clear_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags);
clear_bit(WX_FLAG_SRIOV_ENABLED, wx->flags);
/* Disable VMDq flag so device will be set in NM mode */
if (wx->ring_feature[RING_F_VMDQ].limit == 1)
@@ -78,6 +79,9 @@ static int __wx_enable_sriov(struct wx *wx, u8 num_vfs)
set_bit(WX_FLAG_SRIOV_ENABLED, wx->flags);
dev_info(&wx->pdev->dev, "SR-IOV enabled with %d VFs\n", num_vfs);
+ if (num_vfs == 7 && wx->mac.type == wx_mac_em)
+ set_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags);
+
/* Enable VMDq flag so device will be set in VM mode */
set_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
if (!wx->ring_feature[RING_F_VMDQ].limit)
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index 7730c9fc3e02..9d5d10f9e410 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -825,6 +825,11 @@ struct wx_bus_info {
struct wx_mbx_info {
u16 size;
+ u32 mailbox;
+ u32 udelay;
+ u32 timeout;
+ /* lock mbx access */
+ spinlock_t mbx_lock;
};
struct wx_thermal_sensor_data {
@@ -909,7 +914,6 @@ enum wx_reset_type {
struct wx_cb {
dma_addr_t dma;
u16 append_cnt; /* number of skb's appended */
- bool page_released;
bool dma_released;
};
@@ -998,7 +1002,6 @@ struct wx_tx_buffer {
struct wx_rx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
- dma_addr_t page_dma;
struct page *page;
unsigned int page_offset;
};
@@ -1191,6 +1194,7 @@ enum wx_pf_flags {
WX_FLAG_VMDQ_ENABLED,
WX_FLAG_VLAN_PROMISC,
WX_FLAG_SRIOV_ENABLED,
+ WX_FLAG_IRQ_VECTOR_SHARED,
WX_FLAG_FDIR_CAPABLE,
WX_FLAG_FDIR_HASH,
WX_FLAG_FDIR_PERFECT,
@@ -1200,6 +1204,8 @@ enum wx_pf_flags {
WX_FLAG_PTP_PPS_ENABLED,
WX_FLAG_NEED_LINK_CONFIG,
WX_FLAG_NEED_SFP_RESET,
+ WX_FLAG_NEED_UPDATE_LINK,
+ WX_FLAG_NEED_DO_RESET,
WX_PF_FLAGS_NBITS /* must be last */
};
@@ -1210,6 +1216,7 @@ struct wx {
void *priv;
u8 __iomem *hw_addr;
+ u8 __iomem *b4_addr; /* vf only */
struct pci_dev *pdev;
struct net_device *netdev;
struct wx_bus_info bus;
@@ -1284,6 +1291,8 @@ struct wx {
u32 *isb_mem;
u32 isb_tag[WX_ISB_MAX];
bool misc_irq_domain;
+ u32 eims_other;
+ u32 eims_enable_mask;
#define WX_MAX_RETA_ENTRIES 128
#define WX_RSS_INDIR_TBL_MAX 64
@@ -1315,6 +1324,7 @@ struct wx {
int (*setup_tc)(struct net_device *netdev, u8 tc);
void (*do_reset)(struct net_device *netdev);
int (*ptp_setup_sdp)(struct wx *wx);
+ void (*set_num_queues)(struct wx *wx);
bool pps_enabled;
u64 pps_width;
@@ -1343,7 +1353,7 @@ struct wx {
};
#define WX_INTR_ALL (~0ULL)
-#define WX_INTR_Q(i) BIT((i) + 1)
+#define WX_INTR_Q(i) BIT((i))
/* register operations */
#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf.c b/drivers/net/ethernet/wangxun/libwx/wx_vf.c
new file mode 100644
index 000000000000..7567216a005f
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_vf.c
@@ -0,0 +1,599 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+
+#include "wx_type.h"
+#include "wx_hw.h"
+#include "wx_mbx.h"
+#include "wx_vf.h"
+
+static void wx_virt_clr_reg(struct wx *wx)
+{
+ u32 vfsrrctl, i;
+
+ /* VRSRRCTL default values (BSIZEPACKET = 2048, BSIZEHEADER = 256) */
+ vfsrrctl = WX_VXRXDCTL_HDRSZ(wx_hdr_sz(WX_RX_HDR_SIZE));
+ vfsrrctl |= WX_VXRXDCTL_BUFSZ(wx_buf_sz(WX_RX_BUF_SIZE));
+
+ /* clear all rxd ctl */
+ for (i = 0; i < WX_VF_MAX_RING_NUMS; i++)
+ wr32m(wx, WX_VXRXDCTL(i),
+ WX_VXRXDCTL_HDRSZ_MASK | WX_VXRXDCTL_BUFSZ_MASK,
+ vfsrrctl);
+
+ rd32(wx, WX_VXSTATUS);
+}
+
+/**
+ * wx_init_hw_vf - virtual function hardware initialization
+ * @wx: pointer to hardware structure
+ *
+ * Initialize the mac address
+ **/
+void wx_init_hw_vf(struct wx *wx)
+{
+ wx_get_mac_addr_vf(wx, wx->mac.addr);
+}
+EXPORT_SYMBOL(wx_init_hw_vf);
+
+static int wx_mbx_write_and_read_reply(struct wx *wx, u32 *req_buf,
+ u32 *resp_buf, u16 size)
+{
+ int ret;
+
+ ret = wx_write_posted_mbx(wx, req_buf, size);
+ if (ret)
+ return ret;
+
+ return wx_read_posted_mbx(wx, resp_buf, size);
+}
+
+/**
+ * wx_reset_hw_vf - Performs hardware reset
+ * @wx: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks and
+ * clears all interrupts.
+ *
+ * Return: returns 0 on success, negative error code on failure
+ **/
+int wx_reset_hw_vf(struct wx *wx)
+{
+ struct wx_mbx_info *mbx = &wx->mbx;
+ u32 msgbuf[4] = {WX_VF_RESET};
+ u8 *addr = (u8 *)(&msgbuf[1]);
+ u32 b4_buf[16] = {0};
+ u32 timeout = 200;
+ int ret;
+ u32 i;
+
+ /* Call wx stop to disable tx/rx and clear interrupts */
+ wx_stop_adapter_vf(wx);
+
+ /* reset the api version */
+ wx->vfinfo->vf_api = wx_mbox_api_null;
+
+ /* backup msix vectors */
+ if (wx->b4_addr) {
+ for (i = 0; i < 16; i++)
+ b4_buf[i] = readl(wx->b4_addr + i * 4);
+ }
+
+ wr32m(wx, WX_VXCTRL, WX_VXCTRL_RST, WX_VXCTRL_RST);
+ rd32(wx, WX_VXSTATUS);
+
+ /* we cannot reset while the RSTI / RSTD bits are asserted */
+ while (!wx_check_for_rst_vf(wx) && timeout) {
+ timeout--;
+ udelay(5);
+ }
+
+ /* restore msix vectors */
+ if (wx->b4_addr) {
+ for (i = 0; i < 16; i++)
+ writel(b4_buf[i], wx->b4_addr + i * 4);
+ }
+
+ /* amlite: bme */
+ if (wx->mac.type == wx_mac_aml || wx->mac.type == wx_mac_aml40)
+ wr32(wx, WX_VX_PF_BME, WX_VF_BME_ENABLE);
+
+ if (!timeout)
+ return -EBUSY;
+
+ /* Reset VF registers to initial values */
+ wx_virt_clr_reg(wx);
+
+ /* mailbox timeout can now become active */
+ mbx->timeout = 2000;
+
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+
+ if (msgbuf[0] != (WX_VF_RESET | WX_VT_MSGTYPE_ACK) &&
+ msgbuf[0] != (WX_VF_RESET | WX_VT_MSGTYPE_NACK))
+ return -EINVAL;
+
+ if (msgbuf[0] == (WX_VF_RESET | WX_VT_MSGTYPE_ACK))
+ ether_addr_copy(wx->mac.perm_addr, addr);
+
+ wx->mac.mc_filter_type = msgbuf[3];
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_reset_hw_vf);
+
+/**
+ * wx_stop_adapter_vf - Generic stop Tx/Rx units
+ * @wx: pointer to hardware structure
+ *
+ * Clears interrupts, disables transmit and receive units.
+ **/
+void wx_stop_adapter_vf(struct wx *wx)
+{
+ u32 reg_val;
+ u16 i;
+
+ /* Clear interrupt mask to stop from interrupts being generated */
+ wr32(wx, WX_VXIMS, WX_VF_IRQ_CLEAR_MASK);
+
+ /* Clear any pending interrupts, flush previous writes */
+ wr32(wx, WX_VXICR, U32_MAX);
+
+ /* Disable the transmit unit. Each queue must be disabled. */
+ for (i = 0; i < wx->mac.max_tx_queues; i++)
+ wr32(wx, WX_VXTXDCTL(i), WX_VXTXDCTL_FLUSH);
+
+ /* Disable the receive unit by stopping each queue */
+ for (i = 0; i < wx->mac.max_rx_queues; i++) {
+ reg_val = rd32(wx, WX_VXRXDCTL(i));
+ reg_val &= ~WX_VXRXDCTL_ENABLE;
+ wr32(wx, WX_VXRXDCTL(i), reg_val);
+ }
+ /* Clear packet split and pool config */
+ wr32(wx, WX_VXMRQC, 0);
+
+ /* flush all queues disables */
+ rd32(wx, WX_VXSTATUS);
+}
+EXPORT_SYMBOL(wx_stop_adapter_vf);
+
+/**
+ * wx_set_rar_vf - set device MAC address
+ * @wx: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @enable_addr: set flag that address is active
+ *
+ * Return: returns 0 on success, negative error code on failure
+ **/
+int wx_set_rar_vf(struct wx *wx, u32 index, u8 *addr, u32 enable_addr)
+{
+ u32 msgbuf[3] = {WX_VF_SET_MAC_ADDR};
+ u8 *msg_addr = (u8 *)(&msgbuf[1]);
+ int ret;
+
+ memcpy(msg_addr, addr, ETH_ALEN);
+
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+ msgbuf[0] &= ~WX_VT_MSGTYPE_CTS;
+
+ /* if nacked the address was rejected, use "perm_addr" */
+ if (msgbuf[0] == (WX_VF_SET_MAC_ADDR | WX_VT_MSGTYPE_NACK)) {
+ wx_get_mac_addr_vf(wx, wx->mac.addr);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_set_rar_vf);
+
+/**
+ * wx_update_mc_addr_list_vf - Update Multicast addresses
+ * @wx: pointer to the HW structure
+ * @netdev: pointer to the net device structure
+ *
+ * Updates the Multicast Table Array.
+ *
+ * Return: returns 0 on success, negative error code on failure
+ **/
+int wx_update_mc_addr_list_vf(struct wx *wx, struct net_device *netdev)
+{
+ u32 msgbuf[WX_VXMAILBOX_SIZE] = {WX_VF_SET_MULTICAST};
+ u16 *vector_l = (u16 *)&msgbuf[1];
+ struct netdev_hw_addr *ha;
+ u32 cnt, i;
+
+ cnt = netdev_mc_count(netdev);
+ if (cnt > 28)
+ cnt = 28;
+ msgbuf[0] |= cnt << WX_VT_MSGINFO_SHIFT;
+
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev) {
+ if (i == cnt)
+ break;
+ if (is_link_local_ether_addr(ha->addr))
+ continue;
+
+ vector_l[i++] = wx_mta_vector(wx, ha->addr);
+ }
+
+ return wx_write_posted_mbx(wx, msgbuf, ARRAY_SIZE(msgbuf));
+}
+EXPORT_SYMBOL(wx_update_mc_addr_list_vf);
+
+/**
+ * wx_update_xcast_mode_vf - Update Multicast mode
+ * @wx: pointer to the HW structure
+ * @xcast_mode: new multicast mode
+ *
+ * Updates the Multicast Mode of VF.
+ *
+ * Return: returns 0 on success, negative error code on failure
+ **/
+int wx_update_xcast_mode_vf(struct wx *wx, int xcast_mode)
+{
+ u32 msgbuf[2] = {WX_VF_UPDATE_XCAST_MODE, xcast_mode};
+ int ret = 0;
+
+ if (wx->vfinfo->vf_api < wx_mbox_api_13)
+ return -EINVAL;
+
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+
+ msgbuf[0] &= ~WX_VT_MSGTYPE_CTS;
+ if (msgbuf[0] == (WX_VF_UPDATE_XCAST_MODE | WX_VT_MSGTYPE_NACK))
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_update_xcast_mode_vf);
+
+/**
+ * wx_get_link_state_vf - Get VF link state from PF
+ * @wx: pointer to the HW structure
+ * @link_state: link state storage
+ *
+ * Return: return state of the operation error or success.
+ **/
+int wx_get_link_state_vf(struct wx *wx, u16 *link_state)
+{
+ u32 msgbuf[2] = {WX_VF_GET_LINK_STATE};
+ int ret;
+
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+
+ if (msgbuf[0] & WX_VT_MSGTYPE_NACK)
+ return -EINVAL;
+
+ *link_state = msgbuf[1];
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_get_link_state_vf);
+
+/**
+ * wx_set_vfta_vf - Set/Unset vlan filter table address
+ * @wx: pointer to the HW structure
+ * @vlan: 12 bit VLAN ID
+ * @vind: unused by VF drivers
+ * @vlan_on: if true then set bit, else clear bit
+ * @vlvf_bypass: boolean flag indicating updating default pool is okay
+ *
+ * Turn on/off specified VLAN in the VLAN filter table.
+ *
+ * Return: returns 0 on success, negative error code on failure
+ **/
+int wx_set_vfta_vf(struct wx *wx, u32 vlan, u32 vind, bool vlan_on,
+ bool vlvf_bypass)
+{
+ u32 msgbuf[2] = {WX_VF_SET_VLAN, vlan};
+ bool vlan_offload = false;
+ int ret;
+
+ /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
+ msgbuf[0] |= vlan_on << WX_VT_MSGINFO_SHIFT;
+ /* if vf vlan offload is disabled, allow to create vlan under pf port vlan */
+ msgbuf[0] |= BIT(vlan_offload);
+
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+
+ if (msgbuf[0] & WX_VT_MSGTYPE_ACK)
+ return 0;
+
+ return msgbuf[0] & WX_VT_MSGTYPE_NACK;
+}
+EXPORT_SYMBOL(wx_set_vfta_vf);
+
+void wx_get_mac_addr_vf(struct wx *wx, u8 *mac_addr)
+{
+ ether_addr_copy(mac_addr, wx->mac.perm_addr);
+}
+EXPORT_SYMBOL(wx_get_mac_addr_vf);
+
+int wx_get_fw_version_vf(struct wx *wx)
+{
+ u32 msgbuf[2] = {WX_VF_GET_FW_VERSION};
+ int ret;
+
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+
+ if (msgbuf[0] & WX_VT_MSGTYPE_NACK)
+ return -EINVAL;
+ snprintf(wx->eeprom_id, 32, "0x%08x", msgbuf[1]);
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_get_fw_version_vf);
+
+int wx_set_uc_addr_vf(struct wx *wx, u32 index, u8 *addr)
+{
+ u32 msgbuf[3] = {WX_VF_SET_MACVLAN};
+ u8 *msg_addr = (u8 *)(&msgbuf[1]);
+ int ret;
+
+ /* If index is one then this is the start of a new list and needs
+ * indication to the PF so it can do it's own list management.
+ * If it is zero then that tells the PF to just clear all of
+ * this VF's macvlans and there is no new list.
+ */
+ msgbuf[0] |= index << WX_VT_MSGINFO_SHIFT;
+ if (addr)
+ memcpy(msg_addr, addr, 6);
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+
+ msgbuf[0] &= ~WX_VT_MSGTYPE_CTS;
+
+ if (msgbuf[0] == (WX_VF_SET_MACVLAN | WX_VT_MSGTYPE_NACK))
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_set_uc_addr_vf);
+
+/**
+ * wx_rlpml_set_vf - Set the maximum receive packet length
+ * @wx: pointer to the HW structure
+ * @max_size: value to assign to max frame size
+ *
+ * Return: returns 0 on success, negative error code on failure
+ **/
+int wx_rlpml_set_vf(struct wx *wx, u16 max_size)
+{
+ u32 msgbuf[2] = {WX_VF_SET_LPE, max_size};
+ int ret;
+
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+ if ((msgbuf[0] & WX_VF_SET_LPE) &&
+ (msgbuf[0] & WX_VT_MSGTYPE_NACK))
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_rlpml_set_vf);
+
+/**
+ * wx_negotiate_api_version - Negotiate supported API version
+ * @wx: pointer to the HW structure
+ * @api: integer containing requested API version
+ *
+ * Return: returns 0 on success, negative error code on failure
+ **/
+int wx_negotiate_api_version(struct wx *wx, int api)
+{
+ u32 msgbuf[2] = {WX_VF_API_NEGOTIATE, api};
+ int ret;
+
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+
+ msgbuf[0] &= ~WX_VT_MSGTYPE_CTS;
+
+ /* Store value and return 0 on success */
+ if (msgbuf[0] == (WX_VF_API_NEGOTIATE | WX_VT_MSGTYPE_NACK))
+ return -EINVAL;
+ wx->vfinfo->vf_api = api;
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_negotiate_api_version);
+
+int wx_get_queues_vf(struct wx *wx, u32 *num_tcs, u32 *default_tc)
+{
+ u32 msgbuf[5] = {WX_VF_GET_QUEUES};
+ int ret;
+
+ /* do nothing if API doesn't support wx_get_queues */
+ if (wx->vfinfo->vf_api < wx_mbox_api_13)
+ return -EINVAL;
+
+ /* Fetch queue configuration from the PF */
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+ msgbuf[0] &= ~WX_VT_MSGTYPE_CTS;
+
+ /* if we didn't get an ACK there must have been
+ * some sort of mailbox error so we should treat it
+ * as such
+ */
+ if (msgbuf[0] != (WX_VF_GET_QUEUES | WX_VT_MSGTYPE_ACK))
+ return -EINVAL;
+ /* record and validate values from message */
+ wx->mac.max_tx_queues = msgbuf[WX_VF_TX_QUEUES];
+ if (wx->mac.max_tx_queues == 0 ||
+ wx->mac.max_tx_queues > WX_VF_MAX_TX_QUEUES)
+ wx->mac.max_tx_queues = WX_VF_MAX_TX_QUEUES;
+
+ wx->mac.max_rx_queues = msgbuf[WX_VF_RX_QUEUES];
+ if (wx->mac.max_rx_queues == 0 ||
+ wx->mac.max_rx_queues > WX_VF_MAX_RX_QUEUES)
+ wx->mac.max_rx_queues = WX_VF_MAX_RX_QUEUES;
+
+ *num_tcs = msgbuf[WX_VF_TRANS_VLAN];
+ /* in case of unknown state assume we cannot tag frames */
+ if (*num_tcs > wx->mac.max_rx_queues)
+ *num_tcs = 1;
+ *default_tc = msgbuf[WX_VF_DEF_QUEUE];
+ /* default to queue 0 on out-of-bounds queue number */
+ if (*default_tc >= wx->mac.max_tx_queues)
+ *default_tc = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_get_queues_vf);
+
+static int wx_get_link_status_from_pf(struct wx *wx, u32 *msgbuf)
+{
+ u32 links_reg = msgbuf[1];
+
+ if (msgbuf[1] & WX_PF_NOFITY_VF_NET_NOT_RUNNING)
+ wx->notify_down = true;
+ else
+ wx->notify_down = false;
+
+ if (wx->notify_down) {
+ wx->link = false;
+ wx->speed = SPEED_UNKNOWN;
+ return 0;
+ }
+
+ wx->link = WX_PFLINK_STATUS(links_reg);
+ wx->speed = WX_PFLINK_SPEED(links_reg);
+
+ return 0;
+}
+
+static int wx_pf_ping_vf(struct wx *wx, u32 *msgbuf)
+{
+ if (!(msgbuf[0] & WX_VT_MSGTYPE_CTS))
+ /* msg is not CTS, we need to do reset */
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct wx_link_reg_fields wx_speed_lookup_vf[] = {
+ {wx_mac_unknown},
+ {wx_mac_sp, SPEED_10000, SPEED_1000, SPEED_100, SPEED_UNKNOWN, SPEED_UNKNOWN},
+ {wx_mac_em, SPEED_1000, SPEED_100, SPEED_10, SPEED_UNKNOWN, SPEED_UNKNOWN},
+ {wx_mac_aml, SPEED_40000, SPEED_25000, SPEED_10000, SPEED_1000, SPEED_UNKNOWN},
+ {wx_mac_aml40, SPEED_40000, SPEED_25000, SPEED_10000, SPEED_1000, SPEED_UNKNOWN},
+};
+
+static void wx_check_physical_link(struct wx *wx)
+{
+ u32 val, link_val;
+ int ret;
+
+ /* get link status from hw status reg
+ * for SFP+ modules and DA cables, it can take up to 500usecs
+ * before the link status is correct
+ */
+ if (wx->mac.type == wx_mac_em)
+ ret = read_poll_timeout_atomic(rd32, val, val & GENMASK(4, 1),
+ 100, 500, false, wx, WX_VXSTATUS);
+ else
+ ret = read_poll_timeout_atomic(rd32, val, val & BIT(0), 100,
+ 500, false, wx, WX_VXSTATUS);
+ if (ret) {
+ wx->speed = SPEED_UNKNOWN;
+ wx->link = false;
+ return;
+ }
+
+ wx->link = true;
+ link_val = WX_VXSTATUS_SPEED(val);
+
+ if (link_val & BIT(0))
+ wx->speed = wx_speed_lookup_vf[wx->mac.type].bit0_f;
+ else if (link_val & BIT(1))
+ wx->speed = wx_speed_lookup_vf[wx->mac.type].bit1_f;
+ else if (link_val & BIT(2))
+ wx->speed = wx_speed_lookup_vf[wx->mac.type].bit2_f;
+ else if (link_val & BIT(3))
+ wx->speed = wx_speed_lookup_vf[wx->mac.type].bit3_f;
+ else
+ wx->speed = SPEED_UNKNOWN;
+}
+
+int wx_check_mac_link_vf(struct wx *wx)
+{
+ struct wx_mbx_info *mbx = &wx->mbx;
+ u32 msgbuf[2] = {0};
+ int ret = 0;
+
+ if (!mbx->timeout)
+ goto out;
+
+ wx_check_for_rst_vf(wx);
+ if (!wx_check_for_msg_vf(wx))
+ ret = wx_read_mbx_vf(wx, msgbuf, 2);
+ if (ret)
+ goto out;
+
+ switch (msgbuf[0] & GENMASK(8, 0)) {
+ case WX_PF_NOFITY_VF_LINK_STATUS | WX_PF_CONTROL_MSG:
+ ret = wx_get_link_status_from_pf(wx, msgbuf);
+ goto out;
+ case WX_PF_CONTROL_MSG:
+ ret = wx_pf_ping_vf(wx, msgbuf);
+ goto out;
+ case 0:
+ if (msgbuf[0] & WX_VT_MSGTYPE_NACK) {
+ /* msg is NACK, we must have lost CTS status */
+ ret = -EBUSY;
+ goto out;
+ }
+ /* no message, check link status */
+ wx_check_physical_link(wx);
+ goto out;
+ default:
+ break;
+ }
+
+ if (!(msgbuf[0] & WX_VT_MSGTYPE_CTS)) {
+ /* msg is not CTS and is NACK we must have lost CTS status */
+ if (msgbuf[0] & WX_VT_MSGTYPE_NACK)
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* the pf is talking, if we timed out in the past we reinit */
+ if (!mbx->timeout) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+out:
+ return ret;
+}
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf.h b/drivers/net/ethernet/wangxun/libwx/wx_vf.h
new file mode 100644
index 000000000000..fec1126703e3
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_vf.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _WX_VF_H_
+#define _WX_VF_H_
+
+#define WX_VF_MAX_RING_NUMS 8
+#define WX_VX_PF_BME 0x4B8
+#define WX_VF_BME_ENABLE BIT(0)
+#define WX_VXSTATUS 0x4
+#define WX_VXCTRL 0x8
+#define WX_VXCTRL_RST BIT(0)
+
+#define WX_VXMRQC 0x78
+#define WX_VXICR 0x100
+#define WX_VXIMS 0x108
+#define WX_VXIMC 0x10C
+#define WX_VF_IRQ_CLEAR_MASK 7
+#define WX_VF_MAX_TX_QUEUES 4
+#define WX_VF_MAX_RX_QUEUES 4
+#define WX_VXTXDCTL(r) (0x3010 + (0x40 * (r)))
+#define WX_VXRXDCTL(r) (0x1010 + (0x40 * (r)))
+#define WX_VXRXDCTL_ENABLE BIT(0)
+#define WX_VXTXDCTL_FLUSH BIT(26)
+
+#define WX_VXITR(i) (0x200 + (4 * (i))) /* i=[0,1] */
+#define WX_VXITR_MASK GENMASK(8, 0)
+#define WX_VXITR_CNT_WDIS BIT(31)
+#define WX_VXIVAR_MISC 0x260
+#define WX_VXIVAR(i) (0x240 + (4 * (i))) /* i=[0,3] */
+
+#define WX_VXRXDCTL_RSCMAX(f) FIELD_PREP(GENMASK(24, 23), f)
+#define WX_VXRXDCTL_BUFLEN(f) FIELD_PREP(GENMASK(6, 1), f)
+#define WX_VXRXDCTL_BUFSZ(f) FIELD_PREP(GENMASK(11, 8), f)
+#define WX_VXRXDCTL_HDRSZ(f) FIELD_PREP(GENMASK(15, 12), f)
+
+#define WX_VXRXDCTL_RSCMAX_MASK GENMASK(24, 23)
+#define WX_VXRXDCTL_BUFLEN_MASK GENMASK(6, 1)
+#define WX_VXRXDCTL_BUFSZ_MASK GENMASK(11, 8)
+#define WX_VXRXDCTL_HDRSZ_MASK GENMASK(15, 12)
+
+#define wx_conf_size(v, mwidth, uwidth) ({ \
+ typeof(v) _v = (v); \
+ (_v == 2 << (mwidth) ? 0 : _v >> (uwidth)); \
+})
+#define wx_buf_len(v) wx_conf_size(v, 13, 7)
+#define wx_hdr_sz(v) wx_conf_size(v, 10, 6)
+#define wx_buf_sz(v) wx_conf_size(v, 14, 10)
+#define wx_pkt_thresh(v) wx_conf_size(v, 4, 0)
+
+#define WX_RX_HDR_SIZE 256
+#define WX_RX_BUF_SIZE 2048
+
+#define WX_RXBUFFER_2048 (2048)
+#define WX_RXBUFFER_3072 3072
+
+/* Receive Path */
+#define WX_VXRDBAL(r) (0x1000 + (0x40 * (r)))
+#define WX_VXRDBAH(r) (0x1004 + (0x40 * (r)))
+#define WX_VXRDT(r) (0x1008 + (0x40 * (r)))
+#define WX_VXRDH(r) (0x100C + (0x40 * (r)))
+
+#define WX_VXRXDCTL_RSCEN BIT(29)
+#define WX_VXRXDCTL_DROP BIT(30)
+#define WX_VXRXDCTL_VLAN BIT(31)
+
+#define WX_VXTDBAL(r) (0x3000 + (0x40 * (r)))
+#define WX_VXTDBAH(r) (0x3004 + (0x40 * (r)))
+#define WX_VXTDT(r) (0x3008 + (0x40 * (r)))
+#define WX_VXTDH(r) (0x300C + (0x40 * (r)))
+
+#define WX_VXTXDCTL_ENABLE BIT(0)
+#define WX_VXTXDCTL_BUFLEN(f) FIELD_PREP(GENMASK(6, 1), f)
+#define WX_VXTXDCTL_PTHRESH(f) FIELD_PREP(GENMASK(11, 8), f)
+#define WX_VXTXDCTL_WTHRESH(f) FIELD_PREP(GENMASK(22, 16), f)
+
+#define WX_VXMRQC_PSR(f) FIELD_PREP(GENMASK(5, 1), f)
+#define WX_VXMRQC_PSR_MASK GENMASK(5, 1)
+#define WX_VXMRQC_PSR_L4HDR BIT(0)
+#define WX_VXMRQC_PSR_L3HDR BIT(1)
+#define WX_VXMRQC_PSR_L2HDR BIT(2)
+#define WX_VXMRQC_PSR_TUNHDR BIT(3)
+#define WX_VXMRQC_PSR_TUNMAC BIT(4)
+
+#define WX_VXRSSRK(i) (0x80 + ((i) * 4)) /* i=[0,9] */
+#define WX_VXRETA(i) (0xC0 + ((i) * 4)) /* i=[0,15] */
+
+#define WX_VXMRQC_RSS(f) FIELD_PREP(GENMASK(31, 16), f)
+#define WX_VXMRQC_RSS_MASK GENMASK(31, 16)
+#define WX_VXMRQC_RSS_ALG_IPV4_TCP BIT(0)
+#define WX_VXMRQC_RSS_ALG_IPV4 BIT(1)
+#define WX_VXMRQC_RSS_ALG_IPV6 BIT(4)
+#define WX_VXMRQC_RSS_ALG_IPV6_TCP BIT(5)
+#define WX_VXMRQC_RSS_EN BIT(8)
+#define WX_VXMRQC_RSS_HASH(f) FIELD_PREP(GENMASK(15, 13), f)
+
+#define WX_PFLINK_STATUS(g) FIELD_GET(BIT(0), g)
+#define WX_PFLINK_SPEED(g) FIELD_GET(GENMASK(31, 1), g)
+#define WX_VXSTATUS_SPEED(g) FIELD_GET(GENMASK(4, 1), g)
+
+struct wx_link_reg_fields {
+ u32 mac_type;
+ u32 bit0_f;
+ u32 bit1_f;
+ u32 bit2_f;
+ u32 bit3_f;
+ u32 bit4_f;
+};
+
+void wx_init_hw_vf(struct wx *wx);
+int wx_reset_hw_vf(struct wx *wx);
+void wx_get_mac_addr_vf(struct wx *wx, u8 *mac_addr);
+void wx_stop_adapter_vf(struct wx *wx);
+int wx_get_fw_version_vf(struct wx *wx);
+int wx_set_rar_vf(struct wx *wx, u32 index, u8 *addr, u32 enable_addr);
+int wx_update_mc_addr_list_vf(struct wx *wx, struct net_device *netdev);
+int wx_set_uc_addr_vf(struct wx *wx, u32 index, u8 *addr);
+int wx_rlpml_set_vf(struct wx *wx, u16 max_size);
+int wx_negotiate_api_version(struct wx *wx, int api);
+int wx_get_queues_vf(struct wx *wx, u32 *num_tcs, u32 *default_tc);
+int wx_update_xcast_mode_vf(struct wx *wx, int xcast_mode);
+int wx_get_link_state_vf(struct wx *wx, u16 *link_state);
+int wx_set_vfta_vf(struct wx *wx, u32 vlan, u32 vind, bool vlan_on,
+ bool vlvf_bypass);
+int wx_check_mac_link_vf(struct wx *wx);
+
+#endif /* _WX_VF_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf_common.c b/drivers/net/ethernet/wangxun/libwx/wx_vf_common.c
new file mode 100644
index 000000000000..ade2bfe563aa
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_vf_common.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+
+#include "wx_type.h"
+#include "wx_mbx.h"
+#include "wx_lib.h"
+#include "wx_vf.h"
+#include "wx_vf_lib.h"
+#include "wx_vf_common.h"
+
+int wxvf_suspend(struct device *dev_d)
+{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
+ struct wx *wx = pci_get_drvdata(pdev);
+
+ netif_device_detach(wx->netdev);
+ wx_clear_interrupt_scheme(wx);
+ pci_disable_device(pdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(wxvf_suspend);
+
+void wxvf_shutdown(struct pci_dev *pdev)
+{
+ wxvf_suspend(&pdev->dev);
+}
+EXPORT_SYMBOL(wxvf_shutdown);
+
+int wxvf_resume(struct device *dev_d)
+{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
+ struct wx *wx = pci_get_drvdata(pdev);
+
+ pci_set_master(pdev);
+ wx_init_interrupt_scheme(wx);
+ netif_device_attach(wx->netdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(wxvf_resume);
+
+void wxvf_remove(struct pci_dev *pdev)
+{
+ struct wx *wx = pci_get_drvdata(pdev);
+ struct net_device *netdev;
+
+ cancel_work_sync(&wx->service_task);
+ netdev = wx->netdev;
+ unregister_netdev(netdev);
+ kfree(wx->vfinfo);
+ kfree(wx->rss_key);
+ kfree(wx->mac_table);
+ wx_clear_interrupt_scheme(wx);
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_disable_device(pdev);
+}
+EXPORT_SYMBOL(wxvf_remove);
+
+static irqreturn_t wx_msix_misc_vf(int __always_unused irq, void *data)
+{
+ struct wx *wx = data;
+
+ set_bit(WX_FLAG_NEED_UPDATE_LINK, wx->flags);
+ /* Clear the interrupt */
+ if (netif_running(wx->netdev))
+ wr32(wx, WX_VXIMC, wx->eims_other);
+
+ return IRQ_HANDLED;
+}
+
+int wx_request_msix_irqs_vf(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ int vector, err;
+
+ for (vector = 0; vector < wx->num_q_vectors; vector++) {
+ struct wx_q_vector *q_vector = wx->q_vector[vector];
+ struct msix_entry *entry = &wx->msix_q_entries[vector];
+
+ if (q_vector->tx.ring && q_vector->rx.ring)
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-TxRx-%d", netdev->name, entry->entry);
+ else
+ /* skip this unused q_vector */
+ continue;
+
+ err = request_irq(entry->vector, wx_msix_clean_rings, 0,
+ q_vector->name, q_vector);
+ if (err) {
+ wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n",
+ q_vector->name, err);
+ goto free_queue_irqs;
+ }
+ }
+
+ err = request_threaded_irq(wx->msix_entry->vector, wx_msix_misc_vf,
+ NULL, IRQF_ONESHOT, netdev->name, wx);
+ if (err) {
+ wx_err(wx, "request_irq for msix_other failed: %d\n", err);
+ goto free_queue_irqs;
+ }
+
+ return 0;
+
+free_queue_irqs:
+ while (vector) {
+ vector--;
+ free_irq(wx->msix_q_entries[vector].vector,
+ wx->q_vector[vector]);
+ }
+ wx_reset_interrupt_capability(wx);
+ return err;
+}
+EXPORT_SYMBOL(wx_request_msix_irqs_vf);
+
+void wx_negotiate_api_vf(struct wx *wx)
+{
+ int api[] = {
+ wx_mbox_api_13,
+ wx_mbox_api_null};
+ int err = 0, idx = 0;
+
+ spin_lock_bh(&wx->mbx.mbx_lock);
+ while (api[idx] != wx_mbox_api_null) {
+ err = wx_negotiate_api_version(wx, api[idx]);
+ if (!err)
+ break;
+ idx++;
+ }
+ spin_unlock_bh(&wx->mbx.mbx_lock);
+}
+EXPORT_SYMBOL(wx_negotiate_api_vf);
+
+void wx_reset_vf(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ int ret = 0;
+
+ ret = wx_reset_hw_vf(wx);
+ if (!ret)
+ wx_init_hw_vf(wx);
+ wx_negotiate_api_vf(wx);
+ if (is_valid_ether_addr(wx->mac.addr)) {
+ eth_hw_addr_set(netdev, wx->mac.addr);
+ ether_addr_copy(netdev->perm_addr, wx->mac.addr);
+ }
+}
+EXPORT_SYMBOL(wx_reset_vf);
+
+void wx_set_rx_mode_vf(struct net_device *netdev)
+{
+ struct wx *wx = netdev_priv(netdev);
+ unsigned int flags = netdev->flags;
+ int xcast_mode;
+
+ xcast_mode = (flags & IFF_ALLMULTI) ? WXVF_XCAST_MODE_ALLMULTI :
+ (flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
+ WXVF_XCAST_MODE_MULTI : WXVF_XCAST_MODE_NONE;
+ /* request the most inclusive mode we need */
+ if (flags & IFF_PROMISC)
+ xcast_mode = WXVF_XCAST_MODE_PROMISC;
+ else if (flags & IFF_ALLMULTI)
+ xcast_mode = WXVF_XCAST_MODE_ALLMULTI;
+ else if (flags & (IFF_BROADCAST | IFF_MULTICAST))
+ xcast_mode = WXVF_XCAST_MODE_MULTI;
+ else
+ xcast_mode = WXVF_XCAST_MODE_NONE;
+
+ spin_lock_bh(&wx->mbx.mbx_lock);
+ wx_update_xcast_mode_vf(wx, xcast_mode);
+ wx_update_mc_addr_list_vf(wx, netdev);
+ wx_write_uc_addr_list_vf(netdev);
+ spin_unlock_bh(&wx->mbx.mbx_lock);
+}
+EXPORT_SYMBOL(wx_set_rx_mode_vf);
+
+/**
+ * wx_configure_rx_vf - Configure Receive Unit after Reset
+ * @wx: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void wx_configure_rx_vf(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ int i, ret;
+
+ wx_setup_psrtype_vf(wx);
+ wx_setup_vfmrqc_vf(wx);
+
+ spin_lock_bh(&wx->mbx.mbx_lock);
+ ret = wx_rlpml_set_vf(wx,
+ netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
+ spin_unlock_bh(&wx->mbx.mbx_lock);
+ if (ret)
+ wx_dbg(wx, "Failed to set MTU at %d\n", netdev->mtu);
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring
+ */
+ for (i = 0; i < wx->num_rx_queues; i++) {
+ struct wx_ring *rx_ring = wx->rx_ring[i];
+#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC
+ wx_set_rx_buffer_len_vf(wx, rx_ring);
+#endif
+ wx_configure_rx_ring_vf(wx, rx_ring);
+ }
+}
+
+void wx_configure_vf(struct wx *wx)
+{
+ wx_set_rx_mode_vf(wx->netdev);
+ wx_configure_tx_vf(wx);
+ wx_configure_rx_vf(wx);
+}
+EXPORT_SYMBOL(wx_configure_vf);
+
+int wx_set_mac_vf(struct net_device *netdev, void *p)
+{
+ struct wx *wx = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(netdev, addr);
+ if (ret)
+ return ret;
+
+ spin_lock_bh(&wx->mbx.mbx_lock);
+ ret = wx_set_rar_vf(wx, 1, (u8 *)addr->sa_data, 1);
+ spin_unlock_bh(&wx->mbx.mbx_lock);
+
+ if (ret)
+ return -EPERM;
+
+ memcpy(wx->mac.addr, addr->sa_data, netdev->addr_len);
+ memcpy(wx->mac.perm_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_set_mac_vf);
+
+void wxvf_watchdog_update_link(struct wx *wx)
+{
+ int err;
+
+ if (!test_bit(WX_FLAG_NEED_UPDATE_LINK, wx->flags))
+ return;
+
+ spin_lock_bh(&wx->mbx.mbx_lock);
+ err = wx_check_mac_link_vf(wx);
+ spin_unlock_bh(&wx->mbx.mbx_lock);
+ if (err) {
+ wx->link = false;
+ set_bit(WX_FLAG_NEED_DO_RESET, wx->flags);
+ }
+ clear_bit(WX_FLAG_NEED_UPDATE_LINK, wx->flags);
+}
+EXPORT_SYMBOL(wxvf_watchdog_update_link);
+
+static void wxvf_irq_enable(struct wx *wx)
+{
+ wr32(wx, WX_VXIMC, wx->eims_enable_mask);
+}
+
+static void wxvf_up_complete(struct wx *wx)
+{
+ /* Always set the carrier off */
+ netif_carrier_off(wx->netdev);
+ mod_timer(&wx->service_timer, jiffies + HZ);
+ set_bit(WX_FLAG_NEED_UPDATE_LINK, wx->flags);
+
+ wx_configure_msix_vf(wx);
+ smp_mb__before_atomic();
+ wx_napi_enable_all(wx);
+
+ /* clear any pending interrupts, may auto mask */
+ wr32(wx, WX_VXICR, U32_MAX);
+ wxvf_irq_enable(wx);
+ /* enable transmits */
+ netif_tx_start_all_queues(wx->netdev);
+}
+
+int wxvf_open(struct net_device *netdev)
+{
+ struct wx *wx = netdev_priv(netdev);
+ int err;
+
+ err = wx_setup_resources(wx);
+ if (err)
+ goto err_reset;
+ wx_configure_vf(wx);
+
+ err = wx_request_msix_irqs_vf(wx);
+ if (err)
+ goto err_free_resources;
+
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues);
+ if (err)
+ goto err_free_irq;
+
+ err = netif_set_real_num_rx_queues(netdev, wx->num_rx_queues);
+ if (err)
+ goto err_free_irq;
+
+ wxvf_up_complete(wx);
+
+ return 0;
+err_free_irq:
+ wx_free_irq(wx);
+err_free_resources:
+ wx_free_resources(wx);
+err_reset:
+ wx_reset_vf(wx);
+ return err;
+}
+EXPORT_SYMBOL(wxvf_open);
+
+static void wxvf_down(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+
+ timer_delete_sync(&wx->service_timer);
+ netif_tx_stop_all_queues(netdev);
+ netif_tx_disable(netdev);
+ netif_carrier_off(netdev);
+ wx_napi_disable_all(wx);
+ wx_reset_vf(wx);
+
+ wx_clean_all_tx_rings(wx);
+ wx_clean_all_rx_rings(wx);
+}
+
+static void wxvf_reinit_locked(struct wx *wx)
+{
+ while (test_and_set_bit(WX_STATE_RESETTING, wx->state))
+ usleep_range(1000, 2000);
+ wxvf_down(wx);
+ wx_free_irq(wx);
+ wx_configure_vf(wx);
+ wx_request_msix_irqs_vf(wx);
+ wxvf_up_complete(wx);
+ clear_bit(WX_STATE_RESETTING, wx->state);
+}
+
+static void wxvf_reset_subtask(struct wx *wx)
+{
+ if (!test_bit(WX_FLAG_NEED_DO_RESET, wx->flags))
+ return;
+ clear_bit(WX_FLAG_NEED_DO_RESET, wx->flags);
+
+ rtnl_lock();
+ if (test_bit(WX_STATE_RESETTING, wx->state) ||
+ !(netif_running(wx->netdev))) {
+ rtnl_unlock();
+ return;
+ }
+ wxvf_reinit_locked(wx);
+ rtnl_unlock();
+}
+
+int wxvf_close(struct net_device *netdev)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ wxvf_down(wx);
+ wx_free_irq(wx);
+ wx_free_resources(wx);
+
+ return 0;
+}
+EXPORT_SYMBOL(wxvf_close);
+
+static void wxvf_link_config_subtask(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+
+ wxvf_watchdog_update_link(wx);
+ if (wx->link) {
+ if (netif_carrier_ok(netdev))
+ return;
+ netif_carrier_on(netdev);
+ netdev_info(netdev, "Link is Up - %s\n",
+ phy_speed_to_str(wx->speed));
+ } else {
+ if (!netif_carrier_ok(netdev))
+ return;
+ netif_carrier_off(netdev);
+ netdev_info(netdev, "Link is Down\n");
+ }
+}
+
+static void wxvf_service_task(struct work_struct *work)
+{
+ struct wx *wx = container_of(work, struct wx, service_task);
+
+ wxvf_link_config_subtask(wx);
+ wxvf_reset_subtask(wx);
+ wx_service_event_complete(wx);
+}
+
+void wxvf_init_service(struct wx *wx)
+{
+ timer_setup(&wx->service_timer, wx_service_timer, 0);
+ INIT_WORK(&wx->service_task, wxvf_service_task);
+ clear_bit(WX_STATE_SERVICE_SCHED, wx->state);
+}
+EXPORT_SYMBOL(wxvf_init_service);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf_common.h b/drivers/net/ethernet/wangxun/libwx/wx_vf_common.h
new file mode 100644
index 000000000000..cbbb1b178cb2
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_vf_common.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _WX_VF_COMMON_H_
+#define _WX_VF_COMMON_H_
+
+int wxvf_suspend(struct device *dev_d);
+void wxvf_shutdown(struct pci_dev *pdev);
+int wxvf_resume(struct device *dev_d);
+void wxvf_remove(struct pci_dev *pdev);
+int wx_request_msix_irqs_vf(struct wx *wx);
+void wx_negotiate_api_vf(struct wx *wx);
+void wx_reset_vf(struct wx *wx);
+void wx_set_rx_mode_vf(struct net_device *netdev);
+void wx_configure_vf(struct wx *wx);
+int wx_set_mac_vf(struct net_device *netdev, void *p);
+void wxvf_watchdog_update_link(struct wx *wx);
+int wxvf_open(struct net_device *netdev);
+int wxvf_close(struct net_device *netdev);
+void wxvf_init_service(struct wx *wx);
+
+#endif /* _WX_VF_COMMON_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c
new file mode 100644
index 000000000000..5d48df7a849f
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+
+#include "wx_type.h"
+#include "wx_hw.h"
+#include "wx_lib.h"
+#include "wx_vf.h"
+#include "wx_vf_lib.h"
+
+static void wx_write_eitr_vf(struct wx_q_vector *q_vector)
+{
+ struct wx *wx = q_vector->wx;
+ int v_idx = q_vector->v_idx;
+ u32 itr_reg;
+
+ itr_reg = q_vector->itr & WX_VXITR_MASK;
+
+ /* set the WDIS bit to not clear the timer bits and cause an
+ * immediate assertion of the interrupt
+ */
+ itr_reg |= WX_VXITR_CNT_WDIS;
+
+ wr32(wx, WX_VXITR(v_idx), itr_reg);
+}
+
+static void wx_set_ivar_vf(struct wx *wx, s8 direction, u8 queue,
+ u8 msix_vector)
+{
+ u32 ivar, index;
+
+ if (direction == -1) {
+ /* other causes */
+ msix_vector |= WX_PX_IVAR_ALLOC_VAL;
+ ivar = rd32(wx, WX_VXIVAR_MISC);
+ ivar &= ~0xFF;
+ ivar |= msix_vector;
+ wr32(wx, WX_VXIVAR_MISC, ivar);
+ } else {
+ /* tx or rx causes */
+ msix_vector |= WX_PX_IVAR_ALLOC_VAL;
+ index = ((16 * (queue & 1)) + (8 * direction));
+ ivar = rd32(wx, WX_VXIVAR(queue >> 1));
+ ivar &= ~(0xFF << index);
+ ivar |= (msix_vector << index);
+ wr32(wx, WX_VXIVAR(queue >> 1), ivar);
+ }
+}
+
+void wx_configure_msix_vf(struct wx *wx)
+{
+ int v_idx;
+
+ wx->eims_enable_mask = 0;
+ for (v_idx = 0; v_idx < wx->num_q_vectors; v_idx++) {
+ struct wx_q_vector *q_vector = wx->q_vector[v_idx];
+ struct wx_ring *ring;
+
+ wx_for_each_ring(ring, q_vector->rx)
+ wx_set_ivar_vf(wx, 0, ring->reg_idx, v_idx);
+
+ wx_for_each_ring(ring, q_vector->tx)
+ wx_set_ivar_vf(wx, 1, ring->reg_idx, v_idx);
+
+ /* add q_vector eims value to global eims_enable_mask */
+ wx->eims_enable_mask |= BIT(v_idx);
+ wx_write_eitr_vf(q_vector);
+ }
+
+ wx_set_ivar_vf(wx, -1, 1, v_idx);
+
+ /* setup eims_other and add value to global eims_enable_mask */
+ wx->eims_other = BIT(v_idx);
+ wx->eims_enable_mask |= wx->eims_other;
+}
+
+int wx_write_uc_addr_list_vf(struct net_device *netdev)
+{
+ struct wx *wx = netdev_priv(netdev);
+ int count = 0;
+
+ if (!netdev_uc_empty(netdev)) {
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_uc_addr(ha, netdev)
+ wx_set_uc_addr_vf(wx, ++count, ha->addr);
+ } else {
+ /*
+ * If the list is empty then send message to PF driver to
+ * clear all macvlans on this VF.
+ */
+ wx_set_uc_addr_vf(wx, 0, NULL);
+ }
+
+ return count;
+}
+
+/**
+ * wx_configure_tx_ring_vf - Configure Tx ring after Reset
+ * @wx: board private structure
+ * @ring: structure containing ring specific data
+ *
+ * Configure the Tx descriptor ring after a reset.
+ **/
+static void wx_configure_tx_ring_vf(struct wx *wx, struct wx_ring *ring)
+{
+ u8 reg_idx = ring->reg_idx;
+ u64 tdba = ring->dma;
+ u32 txdctl = 0;
+ int ret;
+
+ /* disable queue to avoid issues while updating state */
+ wr32(wx, WX_VXTXDCTL(reg_idx), WX_VXTXDCTL_FLUSH);
+ wr32(wx, WX_VXTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
+ wr32(wx, WX_VXTDBAH(reg_idx), tdba >> 32);
+
+ /* enable relaxed ordering */
+ pcie_capability_clear_and_set_word(wx->pdev, PCI_EXP_DEVCTL,
+ 0, PCI_EXP_DEVCTL_RELAX_EN);
+
+ /* reset head and tail pointers */
+ wr32(wx, WX_VXTDH(reg_idx), 0);
+ wr32(wx, WX_VXTDT(reg_idx), 0);
+ ring->tail = wx->hw_addr + WX_VXTDT(reg_idx);
+
+ /* reset ntu and ntc to place SW in sync with hardwdare */
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+
+ txdctl |= WX_VXTXDCTL_BUFLEN(wx_buf_len(ring->count));
+ txdctl |= WX_VXTXDCTL_ENABLE;
+
+ /* reinitialize tx_buffer_info */
+ memset(ring->tx_buffer_info, 0,
+ sizeof(struct wx_tx_buffer) * ring->count);
+
+ wr32(wx, WX_VXTXDCTL(reg_idx), txdctl);
+ /* poll to verify queue is enabled */
+ ret = read_poll_timeout(rd32, txdctl, txdctl & WX_VXTXDCTL_ENABLE,
+ 1000, 10000, true, wx, WX_VXTXDCTL(reg_idx));
+ if (ret == -ETIMEDOUT)
+ wx_err(wx, "Could not enable Tx Queue %d\n", reg_idx);
+}
+
+/**
+ * wx_configure_tx_vf - Configure Transmit Unit after Reset
+ * @wx: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+void wx_configure_tx_vf(struct wx *wx)
+{
+ u32 i;
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+ for (i = 0; i < wx->num_tx_queues; i++)
+ wx_configure_tx_ring_vf(wx, wx->tx_ring[i]);
+}
+
+static void wx_configure_srrctl_vf(struct wx *wx, struct wx_ring *ring,
+ int index)
+{
+ u32 srrctl;
+
+ srrctl = rd32m(wx, WX_VXRXDCTL(index),
+ (u32)~(WX_VXRXDCTL_HDRSZ_MASK | WX_VXRXDCTL_BUFSZ_MASK));
+ srrctl |= WX_VXRXDCTL_DROP;
+ srrctl |= WX_VXRXDCTL_HDRSZ(wx_hdr_sz(WX_RX_HDR_SIZE));
+ srrctl |= WX_VXRXDCTL_BUFSZ(wx_buf_sz(WX_RX_BUF_SIZE));
+
+ wr32(wx, WX_VXRXDCTL(index), srrctl);
+}
+
+void wx_setup_psrtype_vf(struct wx *wx)
+{
+ /* PSRTYPE must be initialized */
+ u32 psrtype = WX_VXMRQC_PSR_L2HDR |
+ WX_VXMRQC_PSR_L3HDR |
+ WX_VXMRQC_PSR_L4HDR |
+ WX_VXMRQC_PSR_TUNHDR |
+ WX_VXMRQC_PSR_TUNMAC;
+
+ wr32m(wx, WX_VXMRQC, WX_VXMRQC_PSR_MASK, WX_VXMRQC_PSR(psrtype));
+}
+
+void wx_setup_vfmrqc_vf(struct wx *wx)
+{
+ u16 rss_i = wx->num_rx_queues;
+ u32 vfmrqc = 0, vfreta = 0;
+ u8 i, j;
+
+ /* Fill out hash function seeds */
+ netdev_rss_key_fill(wx->rss_key, sizeof(wx->rss_key));
+ for (i = 0; i < WX_RSS_KEY_SIZE / 4; i++)
+ wr32(wx, WX_VXRSSRK(i), wx->rss_key[i]);
+
+ for (i = 0, j = 0; i < WX_MAX_RETA_ENTRIES; i++, j++) {
+ if (j == rss_i)
+ j = 0;
+
+ wx->rss_indir_tbl[i] = j;
+
+ vfreta |= j << (i & 0x3) * 8;
+ if ((i & 3) == 3) {
+ wr32(wx, WX_VXRETA(i >> 2), vfreta);
+ vfreta = 0;
+ }
+ }
+
+ /* Perform hash on these packet types */
+ vfmrqc |= WX_VXMRQC_RSS_ALG_IPV4 |
+ WX_VXMRQC_RSS_ALG_IPV4_TCP |
+ WX_VXMRQC_RSS_ALG_IPV6 |
+ WX_VXMRQC_RSS_ALG_IPV6_TCP;
+
+ vfmrqc |= WX_VXMRQC_RSS_EN;
+
+ if (wx->num_rx_queues > 3)
+ vfmrqc |= WX_VXMRQC_RSS_HASH(2);
+ else if (wx->num_rx_queues > 1)
+ vfmrqc |= WX_VXMRQC_RSS_HASH(1);
+ wr32m(wx, WX_VXMRQC, WX_VXMRQC_RSS_MASK, WX_VXMRQC_RSS(vfmrqc));
+}
+
+void wx_configure_rx_ring_vf(struct wx *wx, struct wx_ring *ring)
+{
+ u8 reg_idx = ring->reg_idx;
+ union wx_rx_desc *rx_desc;
+ u64 rdba = ring->dma;
+ u32 rxdctl;
+
+ /* disable queue to avoid issues while updating state */
+ rxdctl = rd32(wx, WX_VXRXDCTL(reg_idx));
+ wx_disable_rx_queue(wx, ring);
+
+ wr32(wx, WX_VXRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
+ wr32(wx, WX_VXRDBAH(reg_idx), rdba >> 32);
+
+ /* enable relaxed ordering */
+ pcie_capability_clear_and_set_word(wx->pdev, PCI_EXP_DEVCTL,
+ 0, PCI_EXP_DEVCTL_RELAX_EN);
+
+ /* reset head and tail pointers */
+ wr32(wx, WX_VXRDH(reg_idx), 0);
+ wr32(wx, WX_VXRDT(reg_idx), 0);
+ ring->tail = wx->hw_addr + WX_VXRDT(reg_idx);
+
+ /* initialize rx_buffer_info */
+ memset(ring->rx_buffer_info, 0,
+ sizeof(struct wx_rx_buffer) * ring->count);
+
+ /* initialize Rx descriptor 0 */
+ rx_desc = WX_RX_DESC(ring, 0);
+ rx_desc->wb.upper.length = 0;
+
+ /* reset ntu and ntc to place SW in sync with hardwdare */
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+ ring->next_to_alloc = 0;
+
+ wx_configure_srrctl_vf(wx, ring, reg_idx);
+
+ /* allow any size packet since we can handle overflow */
+ rxdctl &= ~WX_VXRXDCTL_BUFLEN_MASK;
+ rxdctl |= WX_VXRXDCTL_BUFLEN(wx_buf_len(ring->count));
+ rxdctl |= WX_VXRXDCTL_ENABLE | WX_VXRXDCTL_VLAN;
+
+ /* enable RSC */
+ rxdctl &= ~WX_VXRXDCTL_RSCMAX_MASK;
+ rxdctl |= WX_VXRXDCTL_RSCMAX(0);
+ rxdctl |= WX_VXRXDCTL_RSCEN;
+
+ wr32(wx, WX_VXRXDCTL(reg_idx), rxdctl);
+
+ /* pf/vf reuse */
+ wx_enable_rx_queue(wx, ring);
+ wx_alloc_rx_buffers(ring, wx_desc_unused(ring));
+}
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h
new file mode 100644
index 000000000000..43ea126b79eb
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _WX_VF_LIB_H_
+#define _WX_VF_LIB_H_
+
+void wx_configure_msix_vf(struct wx *wx);
+int wx_write_uc_addr_list_vf(struct net_device *netdev);
+void wx_setup_psrtype_vf(struct wx *wx);
+void wx_setup_vfmrqc_vf(struct wx *wx);
+void wx_configure_tx_vf(struct wx *wx);
+void wx_configure_rx_ring_vf(struct wx *wx, struct wx_ring *ring);
+
+#endif /* _WX_VF_LIB_H_ */
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index b5022c49dc5e..e0fc897b0a58 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -161,7 +161,7 @@ static void ngbe_irq_enable(struct wx *wx, bool queues)
if (queues)
wx_intr_enable(wx, NGBE_INTR_ALL);
else
- wx_intr_enable(wx, NGBE_INTR_MISC);
+ wx_intr_enable(wx, NGBE_INTR_MISC(wx));
}
/**
@@ -286,7 +286,7 @@ static int ngbe_request_msix_irqs(struct wx *wx)
* for queue. But when num_vfs == 7, vector[1] is assigned to vf6.
* Misc and queue should reuse interrupt vector[0].
*/
- if (wx->num_vfs == 7)
+ if (test_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags))
err = request_irq(wx->msix_entry->vector,
ngbe_misc_and_queue, 0, netdev->name, wx);
else
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
index bb74263f0498..3b2ca7f47e33 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
@@ -87,7 +87,7 @@
#define NGBE_PX_MISC_IC_TIMESYNC BIT(11) /* time sync */
#define NGBE_INTR_ALL 0x1FF
-#define NGBE_INTR_MISC BIT(0)
+#define NGBE_INTR_MISC(A) BIT((A)->msix_entry->entry)
#define NGBE_PHY_CONFIG(reg_offset) (0x14000 + ((reg_offset) * 4))
#define NGBE_CFG_LAN_SPEED 0x14440
diff --git a/drivers/net/ethernet/wangxun/ngbevf/Makefile b/drivers/net/ethernet/wangxun/ngbevf/Makefile
new file mode 100644
index 000000000000..11a4f15e2ce3
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/ngbevf/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd.
+#
+# Makefile for the Wangxun(R) 1GbE virtual functions driver
+#
+
+obj-$(CONFIG_NGBE) += ngbevf.o
+
+ngbevf-objs := ngbevf_main.o
diff --git a/drivers/net/ethernet/wangxun/ngbevf/ngbevf_main.c b/drivers/net/ethernet/wangxun/ngbevf/ngbevf_main.c
new file mode 100644
index 000000000000..c1246ab5239c
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/ngbevf/ngbevf_main.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/etherdevice.h>
+
+#include "../libwx/wx_type.h"
+#include "../libwx/wx_hw.h"
+#include "../libwx/wx_lib.h"
+#include "../libwx/wx_mbx.h"
+#include "../libwx/wx_vf.h"
+#include "../libwx/wx_vf_common.h"
+#include "ngbevf_type.h"
+
+/* ngbevf_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static const struct pci_device_id ngbevf_pci_tbl[] = {
+ { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860AL_W), 0},
+ { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860A2), 0},
+ { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860A2S), 0},
+ { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860A4), 0},
+ { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860A4S), 0},
+ { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860AL2), 0},
+ { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860AL2S), 0},
+ { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860AL4), 0},
+ { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860AL4S), 0},
+ { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860NCSI), 0},
+ { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860A1), 0},
+ { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860AL1), 0},
+ /* required last entry */
+ { .device = 0 }
+};
+
+static const struct net_device_ops ngbevf_netdev_ops = {
+ .ndo_open = wxvf_open,
+ .ndo_stop = wxvf_close,
+ .ndo_start_xmit = wx_xmit_frame,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = wx_set_mac_vf,
+};
+
+static void ngbevf_set_num_queues(struct wx *wx)
+{
+ /* Start with base case */
+ wx->num_rx_queues = 1;
+ wx->num_tx_queues = 1;
+}
+
+static int ngbevf_sw_init(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ struct pci_dev *pdev = wx->pdev;
+ int err;
+
+ /* Initialize pcie info and common capability flags */
+ err = wx_sw_init(wx);
+ if (err < 0)
+ goto err_wx_sw_init;
+
+ /* Initialize the mailbox */
+ err = wx_init_mbx_params_vf(wx);
+ if (err)
+ goto err_init_mbx_params;
+
+ /* Initialize the device type */
+ wx->mac.type = wx_mac_em;
+ wx->mac.max_msix_vectors = NGBEVF_MAX_MSIX_VECTORS;
+ /* lock to protect mailbox accesses */
+ spin_lock_init(&wx->mbx.mbx_lock);
+
+ err = wx_reset_hw_vf(wx);
+ if (err) {
+ wx_err(wx, "PF still in reset state. Is the PF interface up?\n");
+ goto err_reset_hw;
+ }
+ wx_init_hw_vf(wx);
+ wx_negotiate_api_vf(wx);
+ if (is_zero_ether_addr(wx->mac.addr))
+ dev_info(&pdev->dev,
+ "MAC address not assigned by administrator.\n");
+ eth_hw_addr_set(netdev, wx->mac.addr);
+
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ dev_info(&pdev->dev, "Assigning random MAC address\n");
+ eth_hw_addr_random(netdev);
+ ether_addr_copy(wx->mac.addr, netdev->dev_addr);
+ ether_addr_copy(wx->mac.perm_addr, netdev->dev_addr);
+ }
+
+ wx->mac.max_tx_queues = NGBEVF_MAX_TX_QUEUES;
+ wx->mac.max_rx_queues = NGBEVF_MAX_RX_QUEUES;
+ /* Enable dynamic interrupt throttling rates */
+ wx->rx_itr_setting = 1;
+ wx->tx_itr_setting = 1;
+ /* set default ring sizes */
+ wx->tx_ring_count = NGBEVF_DEFAULT_TXD;
+ wx->rx_ring_count = NGBEVF_DEFAULT_RXD;
+ /* set default work limits */
+ wx->tx_work_limit = NGBEVF_DEFAULT_TX_WORK;
+ wx->rx_work_limit = NGBEVF_DEFAULT_RX_WORK;
+ wx->set_num_queues = ngbevf_set_num_queues;
+
+ return 0;
+err_reset_hw:
+ kfree(wx->vfinfo);
+err_init_mbx_params:
+ kfree(wx->rss_key);
+ kfree(wx->mac_table);
+err_wx_sw_init:
+ return err;
+}
+
+/**
+ * ngbevf_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in ngbevf_pci_tbl
+ *
+ * Return: return 0 on success, negative on failure
+ *
+ * ngbevf_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int ngbevf_probe(struct pci_dev *pdev,
+ const struct pci_device_id __always_unused *ent)
+{
+ struct net_device *netdev;
+ struct wx *wx = NULL;
+ int err;
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_pci_disable_dev;
+ }
+
+ err = pci_request_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM),
+ dev_driver_string(&pdev->dev));
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_request_selected_regions failed 0x%x\n", err);
+ goto err_pci_disable_dev;
+ }
+
+ pci_set_master(pdev);
+
+ netdev = devm_alloc_etherdev_mqs(&pdev->dev,
+ sizeof(struct wx),
+ NGBEVF_MAX_TX_QUEUES,
+ NGBEVF_MAX_RX_QUEUES);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_pci_release_regions;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ wx = netdev_priv(netdev);
+ wx->netdev = netdev;
+ wx->pdev = pdev;
+
+ wx->msg_enable = netif_msg_init(-1, NETIF_MSG_DRV |
+ NETIF_MSG_PROBE | NETIF_MSG_LINK);
+ wx->hw_addr = devm_ioremap(&pdev->dev,
+ pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!wx->hw_addr) {
+ err = -EIO;
+ goto err_pci_release_regions;
+ }
+
+ netdev->netdev_ops = &ngbevf_netdev_ops;
+
+ /* setup the private structure */
+ err = ngbevf_sw_init(wx);
+ if (err)
+ goto err_pci_release_regions;
+
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ eth_hw_addr_set(netdev, wx->mac.perm_addr);
+ ether_addr_copy(netdev->perm_addr, wx->mac.addr);
+
+ wxvf_init_service(wx);
+ err = wx_init_interrupt_scheme(wx);
+ if (err)
+ goto err_free_sw_init;
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ pci_set_drvdata(pdev, wx);
+ netif_tx_stop_all_queues(netdev);
+
+ return 0;
+
+err_register:
+ wx_clear_interrupt_scheme(wx);
+err_free_sw_init:
+ timer_delete_sync(&wx->service_timer);
+ cancel_work_sync(&wx->service_task);
+ kfree(wx->vfinfo);
+ kfree(wx->rss_key);
+ kfree(wx->mac_table);
+err_pci_release_regions:
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+err_pci_disable_dev:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * ngbevf_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * ngbevf_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void ngbevf_remove(struct pci_dev *pdev)
+{
+ wxvf_remove(pdev);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(ngbevf_pm_ops, wxvf_suspend, wxvf_resume);
+
+static struct pci_driver ngbevf_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = ngbevf_pci_tbl,
+ .probe = ngbevf_probe,
+ .remove = ngbevf_remove,
+ .shutdown = wxvf_shutdown,
+ /* Power Management Hooks */
+ .driver.pm = pm_sleep_ptr(&ngbevf_pm_ops)
+};
+
+module_pci_driver(ngbevf_driver);
+
+MODULE_DEVICE_TABLE(pci, ngbevf_pci_tbl);
+MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@trustnetic.com>");
+MODULE_DESCRIPTION("WangXun(R) Gigabit PCI Express Network Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/wangxun/ngbevf/ngbevf_type.h b/drivers/net/ethernet/wangxun/ngbevf/ngbevf_type.h
new file mode 100644
index 000000000000..67e761089e99
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/ngbevf/ngbevf_type.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _NGBEVF_TYPE_H_
+#define _NGBEVF_TYPE_H_
+
+/* Device IDs */
+#define NGBEVF_DEV_ID_EM_WX1860AL_W 0x0110
+#define NGBEVF_DEV_ID_EM_WX1860A2 0x0111
+#define NGBEVF_DEV_ID_EM_WX1860A2S 0x0112
+#define NGBEVF_DEV_ID_EM_WX1860A4 0x0113
+#define NGBEVF_DEV_ID_EM_WX1860A4S 0x0114
+#define NGBEVF_DEV_ID_EM_WX1860AL2 0x0115
+#define NGBEVF_DEV_ID_EM_WX1860AL2S 0x0116
+#define NGBEVF_DEV_ID_EM_WX1860AL4 0x0117
+#define NGBEVF_DEV_ID_EM_WX1860AL4S 0x0118
+#define NGBEVF_DEV_ID_EM_WX1860NCSI 0x0119
+#define NGBEVF_DEV_ID_EM_WX1860A1 0x011a
+#define NGBEVF_DEV_ID_EM_WX1860AL1 0x011b
+
+#define NGBEVF_MAX_MSIX_VECTORS 1
+#define NGBEVF_MAX_RX_QUEUES 1
+#define NGBEVF_MAX_TX_QUEUES 1
+#define NGBEVF_DEFAULT_TXD 128
+#define NGBEVF_DEFAULT_RXD 128
+#define NGBEVF_DEFAULT_TX_WORK 256
+#define NGBEVF_DEFAULT_RX_WORK 256
+
+#endif /* _NGBEVF_TYPE_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c
index 7dbcf41750c1..dc87ccad9652 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c
@@ -294,6 +294,7 @@ static void txgbe_mac_link_up_aml(struct phylink_config *config,
wx_fc_enable(wx, tx_pause, rx_pause);
txgbe_reconfig_mac(wx);
+ txgbe_enable_sec_tx_path(wx);
txcfg = rd32(wx, TXGBE_AML_MAC_TX_CFG);
txcfg &= ~TXGBE_AML_MAC_TX_CFG_SPEED_MASK;
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
index 20b9a28bcb55..3885283681ec 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
@@ -31,7 +31,7 @@ void txgbe_irq_enable(struct wx *wx, bool queues)
wr32(wx, WX_PX_MISC_IEN, misc_ien);
/* unmask interrupt */
- wx_intr_enable(wx, TXGBE_INTR_MISC);
+ wx_intr_enable(wx, TXGBE_INTR_MISC(wx));
if (queues)
wx_intr_enable(wx, TXGBE_INTR_QALL(wx));
}
@@ -78,7 +78,6 @@ free_queue_irqs:
free_irq(wx->msix_q_entries[vector].vector,
wx->q_vector[vector]);
}
- wx_reset_interrupt_capability(wx);
return err;
}
@@ -132,7 +131,7 @@ static irqreturn_t txgbe_misc_irq_handle(int irq, void *data)
txgbe->eicr = eicr;
if (eicr & TXGBE_PX_MISC_IC_VF_MBOX) {
wx_msg_task(txgbe->wx);
- wx_intr_enable(wx, TXGBE_INTR_MISC);
+ wx_intr_enable(wx, TXGBE_INTR_MISC(wx));
}
return IRQ_WAKE_THREAD;
}
@@ -184,7 +183,7 @@ static irqreturn_t txgbe_misc_irq_thread_fn(int irq, void *data)
nhandled++;
}
- wx_intr_enable(wx, TXGBE_INTR_MISC);
+ wx_intr_enable(wx, TXGBE_INTR_MISC(wx));
return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
}
@@ -211,6 +210,7 @@ void txgbe_free_misc_irq(struct txgbe *txgbe)
free_irq(txgbe->link_irq, txgbe);
free_irq(txgbe->misc.irq, txgbe);
txgbe_del_irq_domain(txgbe);
+ txgbe->wx->misc_irq_domain = false;
}
int txgbe_setup_misc_irq(struct txgbe *txgbe)
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index f3d2778b8e35..a5867f3c93fc 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -458,10 +458,14 @@ static int txgbe_open(struct net_device *netdev)
wx_configure(wx);
- err = txgbe_request_queue_irqs(wx);
+ err = txgbe_setup_misc_irq(wx->priv);
if (err)
goto err_free_resources;
+ err = txgbe_request_queue_irqs(wx);
+ if (err)
+ goto err_free_misc_irq;
+
/* Notify the stack of the actual queue counts. */
err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues);
if (err)
@@ -479,6 +483,9 @@ static int txgbe_open(struct net_device *netdev)
err_free_irq:
wx_free_irq(wx);
+err_free_misc_irq:
+ txgbe_free_misc_irq(wx->priv);
+ wx_reset_interrupt_capability(wx);
err_free_resources:
wx_free_resources(wx);
err_reset:
@@ -519,6 +526,7 @@ static int txgbe_close(struct net_device *netdev)
wx_ptp_stop(wx);
txgbe_down(wx);
wx_free_irq(wx);
+ txgbe_free_misc_irq(wx->priv);
wx_free_resources(wx);
txgbe_fdir_filter_exit(wx);
wx_control_hw(wx, false);
@@ -564,7 +572,6 @@ static void txgbe_shutdown(struct pci_dev *pdev)
int txgbe_setup_tc(struct net_device *dev, u8 tc)
{
struct wx *wx = netdev_priv(dev);
- struct txgbe *txgbe = wx->priv;
/* Hardware has to reinitialize queues and interrupts to
* match packet buffer alignment. Unfortunately, the
@@ -575,7 +582,6 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc)
else
txgbe_reset(wx);
- txgbe_free_misc_irq(txgbe);
wx_clear_interrupt_scheme(wx);
if (tc)
@@ -584,7 +590,6 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc)
netdev_reset_tc(dev);
wx_init_interrupt_scheme(wx);
- txgbe_setup_misc_irq(txgbe);
if (netif_running(dev))
txgbe_open(dev);
@@ -882,13 +887,9 @@ static int txgbe_probe(struct pci_dev *pdev,
txgbe_init_fdir(txgbe);
- err = txgbe_setup_misc_irq(txgbe);
- if (err)
- goto err_release_hw;
-
err = txgbe_init_phy(txgbe);
if (err)
- goto err_free_misc_irq;
+ goto err_release_hw;
err = register_netdev(netdev);
if (err)
@@ -916,8 +917,6 @@ static int txgbe_probe(struct pci_dev *pdev,
err_remove_phy:
txgbe_remove_phy(txgbe);
-err_free_misc_irq:
- txgbe_free_misc_irq(txgbe);
err_release_hw:
wx_clear_interrupt_scheme(wx);
wx_control_hw(wx, false);
@@ -957,7 +956,6 @@ static void txgbe_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
txgbe_remove_phy(txgbe);
- txgbe_free_misc_irq(txgbe);
wx_free_isb_resources(wx);
pci_release_selected_regions(pdev,
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index 42ec815159e8..41915d7dd372 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -302,8 +302,8 @@ struct txgbe_fdir_filter {
#define TXGBE_DEFAULT_RX_WORK 128
#endif
-#define TXGBE_INTR_MISC BIT(0)
-#define TXGBE_INTR_QALL(A) GENMASK((A)->num_q_vectors, 1)
+#define TXGBE_INTR_MISC(A) BIT((A)->num_q_vectors)
+#define TXGBE_INTR_QALL(A) (TXGBE_INTR_MISC(A) - 1)
#define TXGBE_MAX_EITR GENMASK(11, 3)
diff --git a/drivers/net/ethernet/wangxun/txgbevf/Makefile b/drivers/net/ethernet/wangxun/txgbevf/Makefile
new file mode 100644
index 000000000000..4c7e6de04424
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbevf/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd.
+#
+# Makefile for the Wangxun(R) 10/25/40GbE virtual functions driver
+#
+
+obj-$(CONFIG_TXGBE) += txgbevf.o
+
+txgbevf-objs := txgbevf_main.o
diff --git a/drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c b/drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c
new file mode 100644
index 000000000000..ebfce3cf753e
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/etherdevice.h>
+
+#include "../libwx/wx_type.h"
+#include "../libwx/wx_hw.h"
+#include "../libwx/wx_lib.h"
+#include "../libwx/wx_mbx.h"
+#include "../libwx/wx_vf.h"
+#include "../libwx/wx_vf_common.h"
+#include "txgbevf_type.h"
+
+/* txgbevf_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static const struct pci_device_id txgbevf_pci_tbl[] = {
+ { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_SP1000), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_WX1820), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML500F), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML510F), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML5024), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML5124), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML503F), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML513F), 0},
+ /* required last entry */
+ { .device = 0 }
+};
+
+static const struct net_device_ops txgbevf_netdev_ops = {
+ .ndo_open = wxvf_open,
+ .ndo_stop = wxvf_close,
+ .ndo_start_xmit = wx_xmit_frame,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = wx_set_mac_vf,
+};
+
+static void txgbevf_set_num_queues(struct wx *wx)
+{
+ u32 def_q = 0, num_tcs = 0;
+ u16 rss, queue;
+ int ret = 0;
+
+ /* Start with base case */
+ wx->num_rx_queues = 1;
+ wx->num_tx_queues = 1;
+
+ spin_lock_bh(&wx->mbx.mbx_lock);
+ /* fetch queue configuration from the PF */
+ ret = wx_get_queues_vf(wx, &num_tcs, &def_q);
+ spin_unlock_bh(&wx->mbx.mbx_lock);
+
+ if (ret)
+ return;
+
+ /* we need as many queues as traffic classes */
+ if (num_tcs > 1) {
+ wx->num_rx_queues = num_tcs;
+ } else {
+ rss = min_t(u16, num_online_cpus(), TXGBEVF_MAX_RSS_NUM);
+ queue = min_t(u16, wx->mac.max_rx_queues, wx->mac.max_tx_queues);
+ rss = min_t(u16, queue, rss);
+
+ if (wx->vfinfo->vf_api >= wx_mbox_api_13) {
+ wx->num_rx_queues = rss;
+ wx->num_tx_queues = rss;
+ }
+ }
+}
+
+static void txgbevf_init_type_code(struct wx *wx)
+{
+ switch (wx->device_id) {
+ case TXGBEVF_DEV_ID_SP1000:
+ case TXGBEVF_DEV_ID_WX1820:
+ wx->mac.type = wx_mac_sp;
+ break;
+ case TXGBEVF_DEV_ID_AML500F:
+ case TXGBEVF_DEV_ID_AML510F:
+ case TXGBEVF_DEV_ID_AML5024:
+ case TXGBEVF_DEV_ID_AML5124:
+ case TXGBEVF_DEV_ID_AML503F:
+ case TXGBEVF_DEV_ID_AML513F:
+ wx->mac.type = wx_mac_aml;
+ break;
+ default:
+ wx->mac.type = wx_mac_unknown;
+ break;
+ }
+}
+
+static int txgbevf_sw_init(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ struct pci_dev *pdev = wx->pdev;
+ int err;
+
+ /* Initialize pcie info and common capability flags */
+ err = wx_sw_init(wx);
+ if (err < 0)
+ goto err_wx_sw_init;
+
+ /* Initialize the mailbox */
+ err = wx_init_mbx_params_vf(wx);
+ if (err)
+ goto err_init_mbx_params;
+
+ /* max q_vectors */
+ wx->mac.max_msix_vectors = TXGBEVF_MAX_MSIX_VECTORS;
+ /* Initialize the device type */
+ txgbevf_init_type_code(wx);
+ /* lock to protect mailbox accesses */
+ spin_lock_init(&wx->mbx.mbx_lock);
+
+ err = wx_reset_hw_vf(wx);
+ if (err) {
+ wx_err(wx, "PF still in reset state. Is the PF interface up?\n");
+ goto err_reset_hw;
+ }
+ wx_init_hw_vf(wx);
+ wx_negotiate_api_vf(wx);
+ if (is_zero_ether_addr(wx->mac.addr))
+ dev_info(&pdev->dev,
+ "MAC address not assigned by administrator.\n");
+ eth_hw_addr_set(netdev, wx->mac.addr);
+
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ dev_info(&pdev->dev, "Assigning random MAC address\n");
+ eth_hw_addr_random(netdev);
+ ether_addr_copy(wx->mac.addr, netdev->dev_addr);
+ ether_addr_copy(wx->mac.perm_addr, netdev->dev_addr);
+ }
+
+ wx->mac.max_tx_queues = TXGBEVF_MAX_TX_QUEUES;
+ wx->mac.max_rx_queues = TXGBEVF_MAX_RX_QUEUES;
+ /* Enable dynamic interrupt throttling rates */
+ wx->rx_itr_setting = 1;
+ wx->tx_itr_setting = 1;
+ /* set default ring sizes */
+ wx->tx_ring_count = TXGBEVF_DEFAULT_TXD;
+ wx->rx_ring_count = TXGBEVF_DEFAULT_RXD;
+ /* set default work limits */
+ wx->tx_work_limit = TXGBEVF_DEFAULT_TX_WORK;
+ wx->rx_work_limit = TXGBEVF_DEFAULT_RX_WORK;
+
+ wx->set_num_queues = txgbevf_set_num_queues;
+
+ return 0;
+err_reset_hw:
+ kfree(wx->vfinfo);
+err_init_mbx_params:
+ kfree(wx->rss_key);
+ kfree(wx->mac_table);
+err_wx_sw_init:
+ return err;
+}
+
+/**
+ * txgbevf_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in txgbevf_pci_tbl
+ *
+ * Return: return 0 on success, negative on failure
+ *
+ * txgbevf_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int txgbevf_probe(struct pci_dev *pdev,
+ const struct pci_device_id __always_unused *ent)
+{
+ struct net_device *netdev;
+ struct wx *wx = NULL;
+ int err;
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_pci_disable_dev;
+ }
+
+ err = pci_request_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM),
+ dev_driver_string(&pdev->dev));
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_request_selected_regions failed 0x%x\n", err);
+ goto err_pci_disable_dev;
+ }
+
+ pci_set_master(pdev);
+
+ netdev = devm_alloc_etherdev_mqs(&pdev->dev,
+ sizeof(struct wx),
+ TXGBEVF_MAX_TX_QUEUES,
+ TXGBEVF_MAX_RX_QUEUES);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_pci_release_regions;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ wx = netdev_priv(netdev);
+ wx->netdev = netdev;
+ wx->pdev = pdev;
+
+ wx->msg_enable = netif_msg_init(-1, NETIF_MSG_DRV |
+ NETIF_MSG_PROBE | NETIF_MSG_LINK);
+ wx->hw_addr = devm_ioremap(&pdev->dev,
+ pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!wx->hw_addr) {
+ err = -EIO;
+ goto err_pci_release_regions;
+ }
+
+ wx->b4_addr = devm_ioremap(&pdev->dev,
+ pci_resource_start(pdev, 4),
+ pci_resource_len(pdev, 4));
+ if (!wx->b4_addr) {
+ err = -EIO;
+ goto err_pci_release_regions;
+ }
+
+ netdev->netdev_ops = &txgbevf_netdev_ops;
+
+ /* setup the private structure */
+ err = txgbevf_sw_init(wx);
+ if (err)
+ goto err_pci_release_regions;
+
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ eth_hw_addr_set(netdev, wx->mac.perm_addr);
+ ether_addr_copy(netdev->perm_addr, wx->mac.addr);
+
+ wxvf_init_service(wx);
+ err = wx_init_interrupt_scheme(wx);
+ if (err)
+ goto err_free_sw_init;
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ pci_set_drvdata(pdev, wx);
+ netif_tx_stop_all_queues(netdev);
+
+ return 0;
+
+err_register:
+ wx_clear_interrupt_scheme(wx);
+err_free_sw_init:
+ timer_delete_sync(&wx->service_timer);
+ cancel_work_sync(&wx->service_task);
+ kfree(wx->vfinfo);
+ kfree(wx->rss_key);
+ kfree(wx->mac_table);
+err_pci_release_regions:
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+err_pci_disable_dev:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * txgbevf_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * txgbevf_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void txgbevf_remove(struct pci_dev *pdev)
+{
+ wxvf_remove(pdev);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(txgbevf_pm_ops, wxvf_suspend, wxvf_resume);
+
+static struct pci_driver txgbevf_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = txgbevf_pci_tbl,
+ .probe = txgbevf_probe,
+ .remove = txgbevf_remove,
+ .shutdown = wxvf_shutdown,
+ /* Power Management Hooks */
+ .driver.pm = pm_sleep_ptr(&txgbevf_pm_ops)
+};
+
+module_pci_driver(txgbevf_driver);
+
+MODULE_DEVICE_TABLE(pci, txgbevf_pci_tbl);
+MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@trustnetic.com>");
+MODULE_DESCRIPTION("WangXun(R) 10/25/40 Gigabit Virtual Function Network Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/wangxun/txgbevf/txgbevf_type.h b/drivers/net/ethernet/wangxun/txgbevf/txgbevf_type.h
new file mode 100644
index 000000000000..1364d2b58bb0
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbevf/txgbevf_type.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _TXGBEVF_TYPE_H_
+#define _TXGBEVF_TYPE_H_
+
+/* Device IDs */
+#define TXGBEVF_DEV_ID_SP1000 0x1000
+#define TXGBEVF_DEV_ID_WX1820 0x2000
+#define TXGBEVF_DEV_ID_AML500F 0x500F
+#define TXGBEVF_DEV_ID_AML510F 0x510F
+#define TXGBEVF_DEV_ID_AML5024 0x5024
+#define TXGBEVF_DEV_ID_AML5124 0x5124
+#define TXGBEVF_DEV_ID_AML503F 0x503f
+#define TXGBEVF_DEV_ID_AML513F 0x513f
+
+#define TXGBEVF_MAX_MSIX_VECTORS 2
+#define TXGBEVF_MAX_RSS_NUM 4
+#define TXGBEVF_MAX_RX_QUEUES 4
+#define TXGBEVF_MAX_TX_QUEUES 4
+#define TXGBEVF_DEFAULT_TXD 128
+#define TXGBEVF_DEFAULT_RXD 128
+#define TXGBEVF_DEFAULT_TX_WORK 256
+#define TXGBEVF_DEFAULT_RX_WORK 256
+
+#endif /* _TXGBEVF_TYPE_H_ */
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index edb36ff07a0c..711ed9c2631b 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1309,7 +1309,7 @@ ll_temac_ethtools_set_ringparam(struct net_device *ndev,
if (ering->rx_pending > RX_BD_NUM_MAX ||
ering->rx_mini_pending ||
ering->rx_jumbo_pending ||
- ering->rx_pending > TX_BD_NUM_MAX)
+ ering->tx_pending > TX_BD_NUM_MAX)
return -EINVAL;
if (netif_running(ndev))
@@ -1595,7 +1595,7 @@ static int temac_probe(struct platform_device *pdev)
if (temac_np) {
lp->phy_node = of_parse_phandle(temac_np, "phy-handle", 0);
if (lp->phy_node)
- dev_dbg(lp->dev, "using PHY node %pOF\n", temac_np);
+ dev_dbg(lp->dev, "using PHY node %pOF\n", lp->phy_node);
} else if (pdata) {
snprintf(lp->phy_name, sizeof(lp->phy_name),
PHY_ID_FMT, lp->mii_bus->id, pdata->phy_addr);
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index ecf47107146d..4719d40a63ba 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -286,7 +286,7 @@ static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr,
/* Read the remaining data */
for (; length > 0; length--)
- *to_u8_ptr = *from_u8_ptr;
+ *to_u8_ptr++ = *from_u8_ptr++;
}
}