summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c18
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c9
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_common.h27
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c34
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c15
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c22
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h38
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c110
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c13
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c3
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c3
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c22
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c3
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c3
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c4
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ptp.c3
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c2
-rw-r--r--drivers/net/ethernet/freescale/fec.h3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c48
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c430
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c438
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c9
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.c17
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c23
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c9
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c92
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c75
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h10
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c91
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c30
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c403
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h96
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c120
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c92
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.h27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c52
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c143
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c48
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c703
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h86
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h42
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c163
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.c144
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.h40
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c74
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c233
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c628
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c143
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.h36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c402
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c40
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h44
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c480
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h56
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c42
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c298
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c463
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c37
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c9
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.c7
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.c4
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig3
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c103
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c122
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c315
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c165
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c218
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c1140
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h517
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c203
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c165
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c97
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c187
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c24
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c6
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c9
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c6
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c10
-rw-r--r--drivers/net/ethernet/mscc/Makefile4
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c63
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_fdma.c894
-rw-r--r--drivers/net/ethernet/mscc/ocelot_fdma.h166
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c44
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c530
-rw-r--r--drivers/net/ethernet/mscc/vsc7514_regs.c523
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c6
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c8
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c4
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c102
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c72
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c42
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c13
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c7
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c91
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c5
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c19
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c10
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c4
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c3
-rw-r--r--drivers/net/ethernet/sfc/ptp.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c66
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c4
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c4
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c12
-rw-r--r--drivers/net/ethernet/vertexcom/Kconfig25
-rw-r--r--drivers/net/ethernet/vertexcom/Makefile6
-rw-r--r--drivers/net/ethernet/vertexcom/mse102x.c769
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c1
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c3
242 files changed, 9656 insertions, 5984 deletions
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 027cbacca1c9..db3ec4768159 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -183,6 +183,7 @@ source "drivers/net/ethernet/tehuti/Kconfig"
source "drivers/net/ethernet/ti/Kconfig"
source "drivers/net/ethernet/toshiba/Kconfig"
source "drivers/net/ethernet/tundra/Kconfig"
+source "drivers/net/ethernet/vertexcom/Kconfig"
source "drivers/net/ethernet/via/Kconfig"
source "drivers/net/ethernet/wiznet/Kconfig"
source "drivers/net/ethernet/xilinx/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 33d30b619e00..8a87c1083d1d 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -93,6 +93,7 @@ obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/
obj-$(CONFIG_NET_VENDOR_TI) += ti/
obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/
obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/
+obj-$(CONFIG_NET_VENDOR_VERTEXCOM) += vertexcom/
obj-$(CONFIG_NET_VENDOR_VIA) += via/
obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/
obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 800ee022388f..cccf8a3ead5e 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -76,7 +76,6 @@ struct emac_board_info {
void __iomem *membase;
u32 msg_enable;
struct net_device *ndev;
- struct sk_buff *skb_last;
u16 tx_fifo_stat;
int emacrx_completed_flag;
@@ -499,7 +498,6 @@ static void emac_rx(struct net_device *dev)
struct sk_buff *skb;
u8 *rdptr;
bool good_packet;
- static int rxlen_last;
unsigned int reg_val;
u32 rxhdr, rxstatus, rxcount, rxlen;
@@ -514,22 +512,6 @@ static void emac_rx(struct net_device *dev)
if (netif_msg_rx_status(db))
dev_dbg(db->dev, "RXCount: %x\n", rxcount);
- if ((db->skb_last != NULL) && (rxlen_last > 0)) {
- dev->stats.rx_bytes += rxlen_last;
-
- /* Pass to upper layer */
- db->skb_last->protocol = eth_type_trans(db->skb_last,
- dev);
- netif_rx(db->skb_last);
- dev->stats.rx_packets++;
- db->skb_last = NULL;
- rxlen_last = 0;
-
- reg_val = readl(db->membase + EMAC_RX_CTL_REG);
- reg_val &= ~EMAC_RX_CTL_DMA_EN;
- writel(reg_val, db->membase + EMAC_RX_CTL_REG);
- }
-
if (!rxcount) {
db->emacrx_completed_flag = 1;
reg_val = readl(db->membase + EMAC_INT_CTL_REG);
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index d75d95a97dd9..993b2fb42961 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -1430,16 +1430,19 @@ static int altera_tse_probe(struct platform_device *pdev)
priv->rxdescmem_busaddr = dma_res->start;
} else {
+ ret = -ENODEV;
goto err_free_netdev;
}
- if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
+ if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) {
dma_set_coherent_mask(priv->device,
DMA_BIT_MASK(priv->dmaops->dmamask));
- else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
+ } else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) {
dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
- else
+ } else {
+ ret = -EIO;
goto err_free_netdev;
+ }
/* MAC address space */
ret = request_and_map(pdev, "control_port", &control_port,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 30d24d19f40d..492ac383f16d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1508,9 +1508,6 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
return -EFAULT;
- if (config.flags)
- return -EINVAL;
-
mac_tscr = 0;
switch (config.tx_type) {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
index 23b2d390fcdd..ace691d7cd75 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
@@ -40,10 +40,12 @@
#define AQ_DEVICE_ID_AQC113DEV 0x00C0
#define AQ_DEVICE_ID_AQC113CS 0x94C0
+#define AQ_DEVICE_ID_AQC113CA 0x34C0
#define AQ_DEVICE_ID_AQC114CS 0x93C0
#define AQ_DEVICE_ID_AQC113 0x04C0
#define AQ_DEVICE_ID_AQC113C 0x14C0
#define AQ_DEVICE_ID_AQC115C 0x12C0
+#define AQ_DEVICE_ID_AQC116C 0x11C0
#define HW_ATL_NIC_NAME "Marvell (aQuantia) AQtion 10Gbit Network Adapter"
@@ -53,20 +55,19 @@
#define AQ_NIC_RATE_10G BIT(0)
#define AQ_NIC_RATE_5G BIT(1)
-#define AQ_NIC_RATE_5GSR BIT(2)
-#define AQ_NIC_RATE_2G5 BIT(3)
-#define AQ_NIC_RATE_1G BIT(4)
-#define AQ_NIC_RATE_100M BIT(5)
-#define AQ_NIC_RATE_10M BIT(6)
-#define AQ_NIC_RATE_1G_HALF BIT(7)
-#define AQ_NIC_RATE_100M_HALF BIT(8)
-#define AQ_NIC_RATE_10M_HALF BIT(9)
+#define AQ_NIC_RATE_2G5 BIT(2)
+#define AQ_NIC_RATE_1G BIT(3)
+#define AQ_NIC_RATE_100M BIT(4)
+#define AQ_NIC_RATE_10M BIT(5)
+#define AQ_NIC_RATE_1G_HALF BIT(6)
+#define AQ_NIC_RATE_100M_HALF BIT(7)
+#define AQ_NIC_RATE_10M_HALF BIT(8)
-#define AQ_NIC_RATE_EEE_10G BIT(10)
-#define AQ_NIC_RATE_EEE_5G BIT(11)
-#define AQ_NIC_RATE_EEE_2G5 BIT(12)
-#define AQ_NIC_RATE_EEE_1G BIT(13)
-#define AQ_NIC_RATE_EEE_100M BIT(14)
+#define AQ_NIC_RATE_EEE_10G BIT(9)
+#define AQ_NIC_RATE_EEE_5G BIT(10)
+#define AQ_NIC_RATE_EEE_2G5 BIT(11)
+#define AQ_NIC_RATE_EEE_1G BIT(12)
+#define AQ_NIC_RATE_EEE_100M BIT(13)
#define AQ_NIC_RATE_EEE_MSK (AQ_NIC_RATE_EEE_10G |\
AQ_NIC_RATE_EEE_5G |\
AQ_NIC_RATE_EEE_2G5 |\
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 062a300a566a..dbd284660135 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -80,6 +80,8 @@ struct aq_hw_link_status_s {
};
struct aq_stats_s {
+ u64 brc;
+ u64 btc;
u64 uprc;
u64 mprc;
u64 bprc;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index e22935ce9573..e65ce7199dac 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -231,9 +231,6 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
static int aq_ndev_config_hwtstamp(struct aq_nic_s *aq_nic,
struct hwtstamp_config *config)
{
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 1acf544afeb4..33f1a1377588 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -316,18 +316,22 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
aq_macsec_init(self);
#endif
- mutex_lock(&self->fwreq_mutex);
- err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr);
- mutex_unlock(&self->fwreq_mutex);
- if (err)
- goto err_exit;
+ if (platform_get_ethdev_address(&self->pdev->dev, self->ndev) != 0) {
+ // If DT has none or an invalid one, ask device for MAC address
+ mutex_lock(&self->fwreq_mutex);
+ err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr);
+ mutex_unlock(&self->fwreq_mutex);
- eth_hw_addr_set(self->ndev, addr);
+ if (err)
+ goto err_exit;
- if (!is_valid_ether_addr(self->ndev->dev_addr) ||
- !aq_nic_is_valid_ether_addr(self->ndev->dev_addr)) {
- netdev_warn(self->ndev, "MAC is invalid, will use random.");
- eth_hw_addr_random(self->ndev);
+ if (is_valid_ether_addr(addr) &&
+ aq_nic_is_valid_ether_addr(addr)) {
+ eth_hw_addr_set(self->ndev, addr);
+ } else {
+ netdev_warn(self->ndev, "MAC is invalid, will use random.");
+ eth_hw_addr_random(self->ndev);
+ }
}
#if defined(AQ_CFG_MAC_ADDR_PERMANENT)
@@ -905,8 +909,14 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
data[++i] = stats->mbtc;
data[++i] = stats->bbrc;
data[++i] = stats->bbtc;
- data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
- data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
+ if (stats->brc)
+ data[++i] = stats->brc;
+ else
+ data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
+ if (stats->btc)
+ data[++i] = stats->btc;
+ else
+ data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
data[++i] = stats->dma_pkt_rc;
data[++i] = stats->dma_pkt_tc;
data[++i] = stats->dma_oct_rc;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index d4b1976ee69b..797a95142d1f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -49,6 +49,8 @@ static const struct pci_device_id aq_pci_tbl[] = {
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113C), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC115C), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113CA), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC116C), },
{}
};
@@ -85,7 +87,10 @@ static const struct aq_board_revision_s hw_atl_boards[] = {
{ AQ_DEVICE_ID_AQC113CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
{ AQ_DEVICE_ID_AQC114CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
{ AQ_DEVICE_ID_AQC113C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
- { AQ_DEVICE_ID_AQC115C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+ { AQ_DEVICE_ID_AQC115C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc115c, },
+ { AQ_DEVICE_ID_AQC113CA, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+ { AQ_DEVICE_ID_AQC116C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc116c, },
+
};
MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index d281322d7dd2..f4774cf051c9 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -362,9 +362,6 @@ unsigned int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u
{
unsigned int count;
- WARN_ONCE(!aq_vec_is_valid_tc(self, tc),
- "Invalid tc %u (#rx=%u, #tx=%u)\n",
- tc, self->rx_rings, self->tx_rings);
if (!aq_vec_is_valid_tc(self, tc))
return 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 3f1704cbe1cb..7e88d7234b14 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -867,12 +867,20 @@ static int hw_atl_fw1x_deinit(struct aq_hw_s *self)
int hw_atl_utils_update_stats(struct aq_hw_s *self)
{
struct aq_stats_s *cs = &self->curr_stats;
+ struct aq_stats_s curr_stats = *cs;
struct hw_atl_utils_mbox mbox;
+ bool corrupted_stats = false;
hw_atl_utils_mpi_read_stats(self, &mbox);
-#define AQ_SDELTA(_N_) (self->curr_stats._N_ += \
- mbox.stats._N_ - self->last_stats._N_)
+#define AQ_SDELTA(_N_) \
+do { \
+ if (!corrupted_stats && \
+ ((s64)(mbox.stats._N_ - self->last_stats._N_)) >= 0) \
+ curr_stats._N_ += mbox.stats._N_ - self->last_stats._N_; \
+ else \
+ corrupted_stats = true; \
+} while (0)
if (self->aq_link_status.mbps) {
AQ_SDELTA(uprc);
@@ -892,6 +900,9 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self)
AQ_SDELTA(bbrc);
AQ_SDELTA(bbtc);
AQ_SDELTA(dpc);
+
+ if (!corrupted_stats)
+ *cs = curr_stats;
}
#undef AQ_SDELTA
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index eac631c45c56..4d4cfbc91e19 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -132,9 +132,6 @@ static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed)
if (speed & AQ_NIC_RATE_5G)
rate |= FW2X_RATE_5G;
- if (speed & AQ_NIC_RATE_5GSR)
- rate |= FW2X_RATE_5G;
-
if (speed & AQ_NIC_RATE_2G5)
rate |= FW2X_RATE_2G5;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
index c98708bb044c..5dfc751572ed 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
@@ -65,11 +65,25 @@ const struct aq_hw_caps_s hw_atl2_caps_aqc113 = {
AQ_NIC_RATE_5G |
AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
- AQ_NIC_RATE_1G_HALF |
AQ_NIC_RATE_100M |
- AQ_NIC_RATE_100M_HALF |
- AQ_NIC_RATE_10M |
- AQ_NIC_RATE_10M_HALF,
+ AQ_NIC_RATE_10M,
+};
+
+const struct aq_hw_caps_s hw_atl2_caps_aqc115c = {
+ DEFAULT_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = AQ_NIC_RATE_2G5 |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M |
+ AQ_NIC_RATE_10M,
+};
+
+const struct aq_hw_caps_s hw_atl2_caps_aqc116c = {
+ DEFAULT_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M |
+ AQ_NIC_RATE_10M,
};
static u32 hw_atl2_sem_act_rslvr_get(struct aq_hw_s *self)
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h
index de8723f1c28a..346f0dc9912e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h
@@ -9,6 +9,8 @@
#include "aq_common.h"
extern const struct aq_hw_caps_s hw_atl2_caps_aqc113;
+extern const struct aq_hw_caps_s hw_atl2_caps_aqc115c;
+extern const struct aq_hw_caps_s hw_atl2_caps_aqc116c;
extern const struct aq_hw_ops hw_atl2_ops;
#endif /* HW_ATL2_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h
index b66fa346581c..6bad64c77b87 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h
@@ -239,7 +239,8 @@ struct version_s {
u8 minor;
u16 build;
} phy;
- u32 rsvd;
+ u32 drv_iface_ver:4;
+ u32 rsvd:28;
};
struct link_status_s {
@@ -424,7 +425,7 @@ struct cable_diag_status_s {
u16 rsvd2;
};
-struct statistics_s {
+struct statistics_a0_s {
struct {
u32 link_up;
u32 link_down;
@@ -457,6 +458,33 @@ struct statistics_s {
u32 reserve_fw_gap;
};
+struct __packed statistics_b0_s {
+ u64 rx_good_octets;
+ u64 rx_pause_frames;
+ u64 rx_good_frames;
+ u64 rx_errors;
+ u64 rx_unicast_frames;
+ u64 rx_multicast_frames;
+ u64 rx_broadcast_frames;
+
+ u64 tx_good_octets;
+ u64 tx_pause_frames;
+ u64 tx_good_frames;
+ u64 tx_errors;
+ u64 tx_unicast_frames;
+ u64 tx_multicast_frames;
+ u64 tx_broadcast_frames;
+
+ u32 main_loop_cycles;
+};
+
+struct __packed statistics_s {
+ union __packed {
+ struct statistics_a0_s a0;
+ struct statistics_b0_s b0;
+ };
+};
+
struct filter_caps_s {
u8 l2_filters_base_index:6;
u8 flexible_filter_mask:2;
@@ -545,7 +573,7 @@ struct management_status_s {
u32 rsvd5;
};
-struct fw_interface_out {
+struct __packed fw_interface_out {
struct transaction_counter_s transaction_id;
struct version_s version;
struct link_status_s link_status;
@@ -569,7 +597,6 @@ struct fw_interface_out {
struct core_dump_s core_dump;
u32 rsvd11;
struct statistics_s stats;
- u32 rsvd12;
struct filter_caps_s filter_caps;
struct device_caps_s device_caps;
u32 rsvd13;
@@ -592,6 +619,9 @@ struct fw_interface_out {
#define AQ_HOST_MODE_LOW_POWER 3U
#define AQ_HOST_MODE_SHUTDOWN 4U
+#define AQ_A2_FW_INTERFACE_A0 0
+#define AQ_A2_FW_INTERFACE_B0 1
+
int hw_atl2_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops);
int hw_atl2_utils_soft_reset(struct aq_hw_s *self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
index dd259c8f2f4f..58d426dda3ed 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
@@ -84,7 +84,7 @@ static int hw_atl2_shared_buffer_read_block(struct aq_hw_s *self,
if (cnt > AQ_A2_FW_READ_TRY_MAX)
return -ETIME;
if (tid1.transaction_cnt_a != tid1.transaction_cnt_b)
- udelay(1);
+ mdelay(1);
} while (tid1.transaction_cnt_a != tid1.transaction_cnt_b);
hw_atl2_mif_shared_buf_read(self, offset, (u32 *)data, dwords);
@@ -154,7 +154,7 @@ static void a2_link_speed_mask2fw(u32 speed,
{
link_options->rate_10G = !!(speed & AQ_NIC_RATE_10G);
link_options->rate_5G = !!(speed & AQ_NIC_RATE_5G);
- link_options->rate_N5G = !!(speed & AQ_NIC_RATE_5GSR);
+ link_options->rate_N5G = link_options->rate_5G;
link_options->rate_2P5G = !!(speed & AQ_NIC_RATE_2G5);
link_options->rate_N2P5G = link_options->rate_2P5G;
link_options->rate_1G = !!(speed & AQ_NIC_RATE_1G);
@@ -192,8 +192,6 @@ static u32 a2_fw_lkp_to_mask(struct lkp_link_caps_s *lkp_link_caps)
rate |= AQ_NIC_RATE_10G;
if (lkp_link_caps->rate_5G)
rate |= AQ_NIC_RATE_5G;
- if (lkp_link_caps->rate_N5G)
- rate |= AQ_NIC_RATE_5GSR;
if (lkp_link_caps->rate_2P5G)
rate |= AQ_NIC_RATE_2G5;
if (lkp_link_caps->rate_1G)
@@ -335,15 +333,22 @@ static int aq_a2_fw_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
return 0;
}
-static int aq_a2_fw_update_stats(struct aq_hw_s *self)
+static void aq_a2_fill_a0_stats(struct aq_hw_s *self,
+ struct statistics_s *stats)
{
struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
- struct statistics_s stats;
-
- hw_atl2_shared_buffer_read_safe(self, stats, &stats);
-
-#define AQ_SDELTA(_N_, _F_) (self->curr_stats._N_ += \
- stats.msm._F_ - priv->last_stats.msm._F_)
+ struct aq_stats_s *cs = &self->curr_stats;
+ struct aq_stats_s curr_stats = *cs;
+ bool corrupted_stats = false;
+
+#define AQ_SDELTA(_N, _F) \
+do { \
+ if (!corrupted_stats && \
+ ((s64)(stats->a0.msm._F - priv->last_stats.a0.msm._F)) >= 0) \
+ curr_stats._N += stats->a0.msm._F - priv->last_stats.a0.msm._F;\
+ else \
+ corrupted_stats = true; \
+} while (0)
if (self->aq_link_status.mbps) {
AQ_SDELTA(uprc, rx_unicast_frames);
@@ -362,17 +367,76 @@ static int aq_a2_fw_update_stats(struct aq_hw_s *self)
AQ_SDELTA(mbtc, tx_multicast_octets);
AQ_SDELTA(bbrc, rx_broadcast_octets);
AQ_SDELTA(bbtc, tx_broadcast_octets);
+
+ if (!corrupted_stats)
+ *cs = curr_stats;
}
#undef AQ_SDELTA
- self->curr_stats.dma_pkt_rc =
- hw_atl_stats_rx_dma_good_pkt_counter_get(self);
- self->curr_stats.dma_pkt_tc =
- hw_atl_stats_tx_dma_good_pkt_counter_get(self);
- self->curr_stats.dma_oct_rc =
- hw_atl_stats_rx_dma_good_octet_counter_get(self);
- self->curr_stats.dma_oct_tc =
- hw_atl_stats_tx_dma_good_octet_counter_get(self);
- self->curr_stats.dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
+
+}
+
+static void aq_a2_fill_b0_stats(struct aq_hw_s *self,
+ struct statistics_s *stats)
+{
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct aq_stats_s *cs = &self->curr_stats;
+ struct aq_stats_s curr_stats = *cs;
+ bool corrupted_stats = false;
+
+#define AQ_SDELTA(_N, _F) \
+do { \
+ if (!corrupted_stats && \
+ ((s64)(stats->b0._F - priv->last_stats.b0._F)) >= 0) \
+ curr_stats._N += stats->b0._F - priv->last_stats.b0._F; \
+ else \
+ corrupted_stats = true; \
+} while (0)
+
+ if (self->aq_link_status.mbps) {
+ AQ_SDELTA(uprc, rx_unicast_frames);
+ AQ_SDELTA(mprc, rx_multicast_frames);
+ AQ_SDELTA(bprc, rx_broadcast_frames);
+ AQ_SDELTA(erpr, rx_errors);
+ AQ_SDELTA(brc, rx_good_octets);
+
+ AQ_SDELTA(uptc, tx_unicast_frames);
+ AQ_SDELTA(mptc, tx_multicast_frames);
+ AQ_SDELTA(bptc, tx_broadcast_frames);
+ AQ_SDELTA(erpt, tx_errors);
+ AQ_SDELTA(btc, tx_good_octets);
+
+ if (!corrupted_stats)
+ *cs = curr_stats;
+ }
+#undef AQ_SDELTA
+}
+
+static int aq_a2_fw_update_stats(struct aq_hw_s *self)
+{
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct aq_stats_s *cs = &self->curr_stats;
+ struct statistics_s stats;
+ struct version_s version;
+ int err;
+
+ err = hw_atl2_shared_buffer_read_safe(self, version, &version);
+ if (err)
+ return err;
+
+ err = hw_atl2_shared_buffer_read_safe(self, stats, &stats);
+ if (err)
+ return err;
+
+ if (version.drv_iface_ver == AQ_A2_FW_INTERFACE_A0)
+ aq_a2_fill_a0_stats(self, &stats);
+ else
+ aq_a2_fill_b0_stats(self, &stats);
+
+ cs->dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counter_get(self);
+ cs->dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counter_get(self);
+ cs->dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counter_get(self);
+ cs->dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counter_get(self);
+ cs->dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
memcpy(&priv->last_stats, &stats, sizeof(stats));
@@ -499,9 +563,9 @@ u32 hw_atl2_utils_get_fw_version(struct aq_hw_s *self)
hw_atl2_shared_buffer_read_safe(self, version, &version);
/* A2 FW version is stored in reverse order */
- return version.mac.major << 24 |
- version.mac.minor << 16 |
- version.mac.build;
+ return version.bundle.major << 24 |
+ version.bundle.minor << 16 |
+ version.bundle.build;
}
int hw_atl2_utils_get_action_resolve_table_caps(struct aq_hw_s *self,
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index ff924f06581e..270c2935591b 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1024,17 +1024,6 @@ static void ag71xx_mac_config(struct phylink_config *config, unsigned int mode,
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]);
}
-static void ag71xx_mac_pcs_get_state(struct phylink_config *config,
- struct phylink_link_state *state)
-{
- state->link = 0;
-}
-
-static void ag71xx_mac_an_restart(struct phylink_config *config)
-{
- /* Not Supported */
-}
-
static void ag71xx_mac_link_down(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
@@ -1098,8 +1087,6 @@ static void ag71xx_mac_link_up(struct phylink_config *config,
static const struct phylink_mac_ops ag71xx_phylink_mac_ops = {
.validate = phylink_generic_validate,
- .mac_pcs_get_state = ag71xx_mac_pcs_get_state,
- .mac_an_restart = ag71xx_mac_an_restart,
.mac_config = ag71xx_mac_config,
.mac_link_down = ag71xx_mac_link_down,
.mac_link_up = ag71xx_mac_link_up,
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index 7cc5213c575a..b07cb9bc5f2d 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -708,7 +708,9 @@ static int bcm4908_enet_probe(struct platform_device *pdev)
enet->irq_tx = platform_get_irq_byname(pdev, "tx");
- dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
err = bcm4908_enet_dma_alloc(enet);
if (err)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index aec666e97683..651bc1d7a57a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -15356,11 +15356,6 @@ static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
config.tx_type, config.rx_filter);
- if (config.flags) {
- BNX2X_ERR("config.flags is reserved for future use\n");
- return -EINVAL;
- }
-
bp->hwtstamp_ioctl_called = true;
bp->tx_type = config.tx_type;
bp->rx_filter = config.rx_filter;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 8388be119f9a..48520967746f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -417,9 +417,6 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
return -EFAULT;
- if (stmpconf.flags)
- return -EINVAL;
-
if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
stmpconf.tx_type != HWTSTAMP_TX_OFF)
return -ERANGE;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 283f3c1f1195..c28f8cc00d1c 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -13806,9 +13806,6 @@ static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
return -EFAULT;
- if (stmpconf.flags)
- return -EINVAL;
-
if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
stmpconf.tx_type != HWTSTAMP_TX_OFF)
return -ERANGE;
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 4b4ad731897f..8aca768571b2 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -235,13 +235,18 @@ static int
bnad_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
- u32 supported, advertising;
-
- supported = SUPPORTED_10000baseT_Full;
- advertising = ADVERTISED_10000baseT_Full;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseCR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseLR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseCR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseLR_Full);
cmd->base.autoneg = AUTONEG_DISABLE;
- supported |= SUPPORTED_FIBRE;
- advertising |= ADVERTISED_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
cmd->base.port = PORT_FIBRE;
cmd->base.phy_address = 0;
@@ -253,11 +258,6 @@ bnad_get_link_ksettings(struct net_device *netdev,
cmd->base.duplex = DUPLEX_UNKNOWN;
}
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- advertising);
-
return 0;
}
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index 095c5a2144a7..fb6b27f46b15 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -464,10 +464,6 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd)
sizeof(*tstamp_config)))
return -EFAULT;
- /* reserved for future extensions */
- if (tstamp_config->flags)
- return -EINVAL;
-
switch (tstamp_config->tx_type) {
case HWTSTAMP_TX_OFF:
break;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 12eee2bc7f5c..ba28aa444e5a 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -2114,9 +2114,6 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
return -EFAULT;
- if (conf.flags)
- return -EINVAL;
-
switch (conf.tx_type) {
case HWTSTAMP_TX_ON:
case HWTSTAMP_TX_OFF:
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index c607756b731f..568f211d91cc 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -1254,9 +1254,6 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
return -EFAULT;
- if (conf.flags)
- return -EINVAL;
-
switch (conf.tx_type) {
case HWTSTAMP_TX_ON:
case HWTSTAMP_TX_OFF:
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 4b4ffdd1044d..103591dcea1c 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -702,9 +702,6 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
return -EFAULT;
- if (config.flags) /* reserved for future extensions */
- return -EINVAL;
-
/* Check the status of hardware for tiemstamps */
if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
/* Get the current state of the PTP clock */
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index bb45d5df2856..63191692f624 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1917,10 +1917,6 @@ static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
diff --git a/drivers/net/ethernet/engleder/tsnep_ptp.c b/drivers/net/ethernet/engleder/tsnep_ptp.c
index 4bfb4d8508f5..eaad453d487e 100644
--- a/drivers/net/ethernet/engleder/tsnep_ptp.c
+++ b/drivers/net/ethernet/engleder/tsnep_ptp.c
@@ -31,9 +31,6 @@ int tsnep_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- if (config.flags)
- return -EINVAL;
-
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 6451c8383639..8e643567abce 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -4550,6 +4550,8 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
fsl_mc_portal_free(priv->mc_io);
+ destroy_workqueue(priv->dpaa2_ptp_wq);
+
dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
free_netdev(net_dev);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 7b4961daa254..ed7301b69169 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -377,6 +377,9 @@ struct bufdesc_ex {
#define FEC_ENET_WAKEUP ((uint)0x00020000) /* Wakeup request */
#define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2)
#define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2)
+#define FEC_ENET_RXF_GET(X) (((X) == 0) ? FEC_ENET_RXF_0 : \
+ (((X) == 1) ? FEC_ENET_RXF_1 : \
+ FEC_ENET_RXF_2))
#define FEC_ENET_TS_AVAIL ((uint)0x00010000)
#define FEC_ENET_TS_TIMER ((uint)0x00008000)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index f38e70692514..796133de527e 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1185,6 +1185,21 @@ static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
}
}
+static void fec_irqs_disable(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ writel(0, fep->hwp + FEC_IMASK);
+}
+
+static void fec_irqs_disable_except_wakeup(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ writel(0, fep->hwp + FEC_IMASK);
+ writel(FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
+}
+
static void
fec_stop(struct net_device *ndev)
{
@@ -1211,15 +1226,13 @@ fec_stop(struct net_device *ndev)
writel(1, fep->hwp + FEC_ECNTRL);
udelay(10);
}
- writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
} else {
- writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
val = readl(fep->hwp + FEC_ECNTRL);
val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
writel(val, fep->hwp + FEC_ECNTRL);
- fec_enet_stop_mode(fep, true);
}
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
/* We have to keep ENET enabled to have MII interrupt stay working */
if (fep->quirks & FEC_QUIRK_ENET_MAC &&
@@ -1480,7 +1493,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
break;
pkt_received++;
- writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
+ writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT);
/* Check for errors. */
status ^= BD_ENET_RX_LAST;
@@ -2877,15 +2890,10 @@ fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
return -EINVAL;
device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
- if (device_may_wakeup(&ndev->dev)) {
+ if (device_may_wakeup(&ndev->dev))
fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
- if (fep->wake_irq > 0)
- enable_irq_wake(fep->wake_irq);
- } else {
+ else
fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
- if (fep->wake_irq > 0)
- disable_irq_wake(fep->wake_irq);
- }
return 0;
}
@@ -4057,9 +4065,19 @@ static int __maybe_unused fec_suspend(struct device *dev)
netif_device_detach(ndev);
netif_tx_unlock_bh(ndev);
fec_stop(ndev);
- fec_enet_clk_enable(ndev, false);
- if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
+ if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
+ fec_irqs_disable(ndev);
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
+ } else {
+ fec_irqs_disable_except_wakeup(ndev);
+ if (fep->wake_irq > 0) {
+ disable_irq(fep->wake_irq);
+ enable_irq_wake(fep->wake_irq);
+ }
+ fec_enet_stop_mode(fep, true);
+ }
+ /* It's safe to disable clocks since interrupts are masked */
+ fec_enet_clk_enable(ndev, false);
}
rtnl_unlock();
@@ -4097,6 +4115,10 @@ static int __maybe_unused fec_resume(struct device *dev)
}
if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
fec_enet_stop_mode(fep, false);
+ if (fep->wake_irq) {
+ disable_irq_wake(fep->wake_irq);
+ enable_irq(fep->wake_irq);
+ }
val = readl(fep->hwp + FEC_ECNTRL);
val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index d71eac7e1924..af99017a5453 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -473,10 +473,6 @@ int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
fep->hwts_tx_en = 0;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index acab58fd3db3..206b7a35eaf5 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2076,10 +2076,6 @@ static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
priv->hwts_tx_en = 0;
diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c
index 88ca49cbc1e2..d57508bc4307 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.c
+++ b/drivers/net/ethernet/google/gve/gve_utils.c
@@ -68,6 +68,9 @@ struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
set_protocol = ctx->curr_frag_cnt == ctx->expected_frag_cnt - 1;
} else {
skb = napi_alloc_skb(napi, len);
+
+ if (unlikely(!skb))
+ return NULL;
set_protocol = true;
}
__skb_put(skb, len);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 081295bff765..817e2e8a7287 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -1083,7 +1083,7 @@ static void hns3_dump_page_pool_info(struct hns3_enet_ring *ring,
sprintf(result[j++], "%u", index);
sprintf(result[j++], "%u",
READ_ONCE(ring->page_pool->pages_state_hold_cnt));
- sprintf(result[j++], "%u",
+ sprintf(result[j++], "%d",
atomic_read(&ring->page_pool->pages_state_release_cnt));
sprintf(result[j++], "%u", ring->page_pool->p.pool_size);
sprintf(result[j++], "%u", ring->page_pool->p.order);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
index bd8801065e02..83aa1450ab9f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
@@ -4,6 +4,8 @@
#ifndef __HNS3_DEBUGFS_H
#define __HNS3_DEBUGFS_H
+#include "hnae3.h"
+
#define HNS3_DBG_READ_LEN 65536
#define HNS3_DBG_READ_LEN_128KB 0x20000
#define HNS3_DBG_READ_LEN_1MB 0x100000
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 8c7707263f9d..babc5d7a3b52 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -1002,9 +1002,7 @@ static bool hns3_can_use_tx_bounce(struct hns3_enet_ring *ring,
return false;
if (ALIGN(len, dma_get_cache_alignment()) > space) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_spare_full++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_spare_full);
return false;
}
@@ -1021,9 +1019,7 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
return false;
if (space < HNS3_MAX_SGL_SIZE) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_spare_full++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_spare_full);
return false;
}
@@ -1548,92 +1544,122 @@ static bool hns3_check_hw_tx_csum(struct sk_buff *skb)
return true;
}
-static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
- struct sk_buff *skb, struct hns3_desc *desc,
- struct hns3_desc_cb *desc_cb)
+struct hns3_desc_param {
+ u32 paylen_ol4cs;
+ u32 ol_type_vlan_len_msec;
+ u32 type_cs_vlan_tso;
+ u16 mss_hw_csum;
+ u16 inner_vtag;
+ u16 out_vtag;
+};
+
+static void hns3_init_desc_data(struct sk_buff *skb, struct hns3_desc_param *pa)
+{
+ pa->paylen_ol4cs = skb->len;
+ pa->ol_type_vlan_len_msec = 0;
+ pa->type_cs_vlan_tso = 0;
+ pa->mss_hw_csum = 0;
+ pa->inner_vtag = 0;
+ pa->out_vtag = 0;
+}
+
+static int hns3_handle_vlan_info(struct hns3_enet_ring *ring,
+ struct sk_buff *skb,
+ struct hns3_desc_param *param)
{
- u32 ol_type_vlan_len_msec = 0;
- u32 paylen_ol4cs = skb->len;
- u32 type_cs_vlan_tso = 0;
- u16 mss_hw_csum = 0;
- u16 inner_vtag = 0;
- u16 out_vtag = 0;
int ret;
ret = hns3_handle_vtags(ring, skb);
if (unlikely(ret < 0)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_vlan_err++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_vlan_err);
return ret;
} else if (ret == HNS3_INNER_VLAN_TAG) {
- inner_vtag = skb_vlan_tag_get(skb);
- inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
+ param->inner_vtag = skb_vlan_tag_get(skb);
+ param->inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
VLAN_PRIO_MASK;
- hns3_set_field(type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1);
+ hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1);
} else if (ret == HNS3_OUTER_VLAN_TAG) {
- out_vtag = skb_vlan_tag_get(skb);
- out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
+ param->out_vtag = skb_vlan_tag_get(skb);
+ param->out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
VLAN_PRIO_MASK;
- hns3_set_field(ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B,
+ hns3_set_field(param->ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B,
1);
}
+ return 0;
+}
- desc_cb->send_bytes = skb->len;
+static int hns3_handle_csum_partial(struct hns3_enet_ring *ring,
+ struct sk_buff *skb,
+ struct hns3_desc_cb *desc_cb,
+ struct hns3_desc_param *param)
+{
+ u8 ol4_proto, il4_proto;
+ int ret;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- u8 ol4_proto, il4_proto;
-
- if (hns3_check_hw_tx_csum(skb)) {
- /* set checksum start and offset, defined in 2 Bytes */
- hns3_set_field(type_cs_vlan_tso, HNS3_TXD_CSUM_START_S,
- skb_checksum_start_offset(skb) >> 1);
- hns3_set_field(ol_type_vlan_len_msec,
- HNS3_TXD_CSUM_OFFSET_S,
- skb->csum_offset >> 1);
- mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B);
- goto out_hw_tx_csum;
- }
+ if (hns3_check_hw_tx_csum(skb)) {
+ /* set checksum start and offset, defined in 2 Bytes */
+ hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_CSUM_START_S,
+ skb_checksum_start_offset(skb) >> 1);
+ hns3_set_field(param->ol_type_vlan_len_msec,
+ HNS3_TXD_CSUM_OFFSET_S,
+ skb->csum_offset >> 1);
+ param->mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B);
+ return 0;
+ }
- skb_reset_mac_len(skb);
+ skb_reset_mac_len(skb);
- ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
- if (unlikely(ret < 0)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_l4_proto_err++;
- u64_stats_update_end(&ring->syncp);
- return ret;
- }
+ ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
+ if (unlikely(ret < 0)) {
+ hns3_ring_stats_update(ring, tx_l4_proto_err);
+ return ret;
+ }
- ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
- &type_cs_vlan_tso,
- &ol_type_vlan_len_msec);
- if (unlikely(ret < 0)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_l2l3l4_err++;
- u64_stats_update_end(&ring->syncp);
- return ret;
- }
+ ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
+ &param->type_cs_vlan_tso,
+ &param->ol_type_vlan_len_msec);
+ if (unlikely(ret < 0)) {
+ hns3_ring_stats_update(ring, tx_l2l3l4_err);
+ return ret;
+ }
+
+ ret = hns3_set_tso(skb, &param->paylen_ol4cs, &param->mss_hw_csum,
+ &param->type_cs_vlan_tso, &desc_cb->send_bytes);
+ if (unlikely(ret < 0)) {
+ hns3_ring_stats_update(ring, tx_tso_err);
+ return ret;
+ }
+ return 0;
+}
+
+static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
+ struct sk_buff *skb, struct hns3_desc *desc,
+ struct hns3_desc_cb *desc_cb)
+{
+ struct hns3_desc_param param;
+ int ret;
+
+ hns3_init_desc_data(skb, &param);
+ ret = hns3_handle_vlan_info(ring, skb, &param);
+ if (unlikely(ret < 0))
+ return ret;
+
+ desc_cb->send_bytes = skb->len;
- ret = hns3_set_tso(skb, &paylen_ol4cs, &mss_hw_csum,
- &type_cs_vlan_tso, &desc_cb->send_bytes);
- if (unlikely(ret < 0)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_tso_err++;
- u64_stats_update_end(&ring->syncp);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ ret = hns3_handle_csum_partial(ring, skb, desc_cb, &param);
+ if (ret)
return ret;
- }
}
-out_hw_tx_csum:
/* Set txbd */
desc->tx.ol_type_vlan_len_msec =
- cpu_to_le32(ol_type_vlan_len_msec);
- desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
- desc->tx.paylen_ol4cs = cpu_to_le32(paylen_ol4cs);
- desc->tx.mss_hw_csum = cpu_to_le16(mss_hw_csum);
- desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
- desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
+ cpu_to_le32(param.ol_type_vlan_len_msec);
+ desc->tx.type_cs_vlan_tso_len = cpu_to_le32(param.type_cs_vlan_tso);
+ desc->tx.paylen_ol4cs = cpu_to_le32(param.paylen_ol4cs);
+ desc->tx.mss_hw_csum = cpu_to_le16(param.mss_hw_csum);
+ desc->tx.vlan_tag = cpu_to_le16(param.inner_vtag);
+ desc->tx.outer_vlan_tag = cpu_to_le16(param.out_vtag);
return 0;
}
@@ -1713,9 +1739,7 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
}
if (unlikely(dma_mapping_error(dev, dma))) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, sw_err_cnt);
return -ENOMEM;
}
@@ -1861,9 +1885,7 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring,
* recursion level of over HNS3_MAX_RECURSION_LEVEL.
*/
if (bd_num == UINT_MAX) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.over_max_recursion++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, over_max_recursion);
return -ENOMEM;
}
@@ -1872,16 +1894,12 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring,
*/
if (skb->len > HNS3_MAX_TSO_SIZE ||
(!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.hw_limitation++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, hw_limitation);
return -ENOMEM;
}
if (__skb_linearize(skb)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, sw_err_cnt);
return -ENOMEM;
}
@@ -1911,9 +1929,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
bd_num = hns3_tx_bd_count(skb->len);
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_copy++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_copy);
}
out:
@@ -1933,9 +1949,7 @@ out:
return bd_num;
}
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_busy++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_busy);
return -EBUSY;
}
@@ -2020,9 +2034,7 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
ring->pending_buf += num;
if (!doorbell) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_more++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_more);
return;
}
@@ -2072,9 +2084,7 @@ static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring,
ret = skb_copy_bits(skb, 0, buf, size);
if (unlikely(ret < 0)) {
hns3_tx_spare_rollback(ring, cb_len);
- u64_stats_update_begin(&ring->syncp);
- ring->stats.copy_bits_err++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, copy_bits_err);
return ret;
}
@@ -2097,9 +2107,8 @@ static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring,
dma_sync_single_for_device(ring_to_dev(ring), dma, size,
DMA_TO_DEVICE);
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_bounce++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_bounce);
+
return bd_num;
}
@@ -2129,9 +2138,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len);
if (unlikely(nents < 0)) {
hns3_tx_spare_rollback(ring, cb_len);
- u64_stats_update_begin(&ring->syncp);
- ring->stats.skb2sgl_err++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, skb2sgl_err);
return -ENOMEM;
}
@@ -2140,9 +2147,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
DMA_TO_DEVICE);
if (unlikely(!sgt->nents)) {
hns3_tx_spare_rollback(ring, cb_len);
- u64_stats_update_begin(&ring->syncp);
- ring->stats.map_sg_err++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, map_sg_err);
return -ENOMEM;
}
@@ -2154,10 +2159,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
for (i = 0; i < sgt->nents; i++)
bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i),
sg_dma_len(sgt->sgl + i));
-
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_sgl++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_sgl);
return bd_num;
}
@@ -2182,23 +2184,45 @@ out:
return hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
}
+static int hns3_handle_skb_desc(struct hns3_enet_ring *ring,
+ struct sk_buff *skb,
+ struct hns3_desc_cb *desc_cb,
+ int next_to_use_head)
+{
+ int ret;
+
+ ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use],
+ desc_cb);
+ if (unlikely(ret < 0))
+ goto fill_err;
+
+ /* 'ret < 0' means filling error, 'ret == 0' means skb->len is
+ * zero, which is unlikely, and 'ret > 0' means how many tx desc
+ * need to be notified to the hw.
+ */
+ ret = hns3_handle_desc_filling(ring, skb);
+ if (likely(ret > 0))
+ return ret;
+
+fill_err:
+ hns3_clear_desc(ring, next_to_use_head);
+ return ret;
+}
+
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping];
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct netdev_queue *dev_queue;
- int pre_ntu, next_to_use_head;
+ int pre_ntu, ret;
bool doorbell;
- int ret;
/* Hardware can only handle short frames above 32 bytes */
if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) {
hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, sw_err_cnt);
return NETDEV_TX_OK;
}
@@ -2217,20 +2241,9 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
goto out_err_tx_ok;
}
- next_to_use_head = ring->next_to_use;
-
- ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use],
- desc_cb);
- if (unlikely(ret < 0))
- goto fill_err;
-
- /* 'ret < 0' means filling error, 'ret == 0' means skb->len is
- * zero, which is unlikely, and 'ret > 0' means how many tx desc
- * need to be notified to the hw.
- */
- ret = hns3_handle_desc_filling(ring, skb);
+ ret = hns3_handle_skb_desc(ring, skb, desc_cb, ring->next_to_use);
if (unlikely(ret <= 0))
- goto fill_err;
+ goto out_err_tx_ok;
pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
(ring->desc_num - 1);
@@ -2252,9 +2265,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
-fill_err:
- hns3_clear_desc(ring, next_to_use_head);
-
out_err_tx_ok:
dev_kfree_skb_any(skb);
hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
@@ -3522,17 +3532,13 @@ static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
for (i = 0; i < cleand_count; i++) {
desc_cb = &ring->desc_cb[ring->next_to_use];
if (desc_cb->reuse_flag) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.reuse_pg_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, reuse_pg_cnt);
hns3_reuse_buffer(ring, ring->next_to_use);
} else {
ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
if (ret) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, sw_err_cnt);
hns3_rl_err(ring_to_netdev(ring),
"alloc rx buffer failed: %d\n",
@@ -3544,9 +3550,7 @@ static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
}
hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
- u64_stats_update_begin(&ring->syncp);
- ring->stats.non_reuse_pg++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, non_reuse_pg);
}
ring_ptr_move_fw(ring, next_to_use);
@@ -3573,9 +3577,7 @@ static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i,
void *frag = napi_alloc_frag(frag_size);
if (unlikely(!frag)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.frag_alloc_err++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, frag_alloc_err);
hns3_rl_err(ring_to_netdev(ring),
"failed to allocate rx frag\n");
@@ -3587,9 +3589,7 @@ static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i,
skb_add_rx_frag(skb, i, virt_to_page(frag),
offset_in_page(frag), frag_size, frag_size);
- u64_stats_update_begin(&ring->syncp);
- ring->stats.frag_alloc++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, frag_alloc);
return 0;
}
@@ -3722,9 +3722,7 @@ static bool hns3_checksum_complete(struct hns3_enet_ring *ring,
hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE)
return false;
- u64_stats_update_begin(&ring->syncp);
- ring->stats.csum_complete++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, csum_complete);
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)csum);
@@ -3798,9 +3796,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
BIT(HNS3_RXD_OL3E_B) |
BIT(HNS3_RXD_OL4E_B)))) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.l3l4_csum_err++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, l3l4_csum_err);
return;
}
@@ -3891,10 +3887,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
skb = ring->skb;
if (unlikely(!skb)) {
hns3_rl_err(netdev, "alloc rx skb fail\n");
-
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, sw_err_cnt);
return -ENOMEM;
}
@@ -3925,9 +3918,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
if (ring->page_pool)
skb_mark_for_recycle(skb);
- u64_stats_update_begin(&ring->syncp);
- ring->stats.seg_pkt_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, seg_pkt_cnt);
ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE);
__skb_put(skb, ring->pull_len);
@@ -4135,9 +4126,7 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
ret = hns3_set_gro_and_checksum(ring, skb, l234info,
bd_base_info, ol_info, csum);
if (unlikely(ret)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.rx_err_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, rx_err_cnt);
return ret;
}
@@ -4353,87 +4342,70 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
return rx_pkt_total;
}
-static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
- struct hnae3_ring_chain_node *head)
+static int hns3_create_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
+ struct hnae3_ring_chain_node **head,
+ bool is_tx)
{
+ u32 bit_value = is_tx ? HNAE3_RING_TYPE_TX : HNAE3_RING_TYPE_RX;
+ u32 field_value = is_tx ? HNAE3_RING_GL_TX : HNAE3_RING_GL_RX;
+ struct hnae3_ring_chain_node *cur_chain = *head;
struct pci_dev *pdev = tqp_vector->handle->pdev;
- struct hnae3_ring_chain_node *cur_chain = head;
struct hnae3_ring_chain_node *chain;
- struct hns3_enet_ring *tx_ring;
- struct hns3_enet_ring *rx_ring;
-
- tx_ring = tqp_vector->tx_group.ring;
- if (tx_ring) {
- cur_chain->tqp_index = tx_ring->tqp->tqp_index;
- hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_TX);
- hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
-
- cur_chain->next = NULL;
-
- while (tx_ring->next) {
- tx_ring = tx_ring->next;
-
- chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
- GFP_KERNEL);
- if (!chain)
- goto err_free_chain;
-
- cur_chain->next = chain;
- chain->tqp_index = tx_ring->tqp->tqp_index;
- hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_TX);
- hnae3_set_field(chain->int_gl_idx,
- HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S,
- HNAE3_RING_GL_TX);
-
- cur_chain = chain;
- }
- }
+ struct hns3_enet_ring *ring;
- rx_ring = tqp_vector->rx_group.ring;
- if (!tx_ring && rx_ring) {
- cur_chain->next = NULL;
- cur_chain->tqp_index = rx_ring->tqp->tqp_index;
- hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_RX);
- hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
+ ring = is_tx ? tqp_vector->tx_group.ring : tqp_vector->rx_group.ring;
- rx_ring = rx_ring->next;
+ if (cur_chain) {
+ while (cur_chain->next)
+ cur_chain = cur_chain->next;
}
- while (rx_ring) {
+ while (ring) {
chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
if (!chain)
- goto err_free_chain;
-
- cur_chain->next = chain;
- chain->tqp_index = rx_ring->tqp->tqp_index;
+ return -ENOMEM;
+ if (cur_chain)
+ cur_chain->next = chain;
+ else
+ *head = chain;
+ chain->tqp_index = ring->tqp->tqp_index;
hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_RX);
- hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
+ bit_value);
+ hnae3_set_field(chain->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, field_value);
cur_chain = chain;
- rx_ring = rx_ring->next;
+ ring = ring->next;
}
return 0;
+}
+
+static struct hnae3_ring_chain_node *
+hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector)
+{
+ struct pci_dev *pdev = tqp_vector->handle->pdev;
+ struct hnae3_ring_chain_node *cur_chain = NULL;
+ struct hnae3_ring_chain_node *chain;
+
+ if (hns3_create_ring_chain(tqp_vector, &cur_chain, true))
+ goto err_free_chain;
+
+ if (hns3_create_ring_chain(tqp_vector, &cur_chain, false))
+ goto err_free_chain;
+
+ return cur_chain;
err_free_chain:
- cur_chain = head->next;
while (cur_chain) {
chain = cur_chain->next;
devm_kfree(&pdev->dev, cur_chain);
cur_chain = chain;
}
- head->next = NULL;
- return -ENOMEM;
+ return NULL;
}
static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
@@ -4442,7 +4414,7 @@ static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
struct pci_dev *pdev = tqp_vector->handle->pdev;
struct hnae3_ring_chain_node *chain_tmp, *chain;
- chain = head->next;
+ chain = head;
while (chain) {
chain_tmp = chain->next;
@@ -4557,7 +4529,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
}
for (i = 0; i < priv->vector_num; i++) {
- struct hnae3_ring_chain_node vector_ring_chain;
+ struct hnae3_ring_chain_node *vector_ring_chain;
tqp_vector = &priv->tqp_vector[i];
@@ -4567,15 +4539,16 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
tqp_vector->tx_group.total_packets = 0;
tqp_vector->handle = h;
- ret = hns3_get_vector_ring_chain(tqp_vector,
- &vector_ring_chain);
- if (ret)
+ vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector);
+ if (!vector_ring_chain) {
+ ret = -ENOMEM;
goto map_ring_fail;
+ }
ret = h->ae_algo->ops->map_ring_to_vector(h,
- tqp_vector->vector_irq, &vector_ring_chain);
+ tqp_vector->vector_irq, vector_ring_chain);
- hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
+ hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain);
if (ret)
goto map_ring_fail;
@@ -4674,7 +4647,7 @@ static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
{
- struct hnae3_ring_chain_node vector_ring_chain;
+ struct hnae3_ring_chain_node *vector_ring_chain;
struct hnae3_handle *h = priv->ae_handle;
struct hns3_enet_tqp_vector *tqp_vector;
int i;
@@ -4689,13 +4662,14 @@ static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
* chain between vector and ring, we should go on to deal with
* the remaining options.
*/
- if (hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain))
+ vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector);
+ if (!vector_ring_chain)
dev_warn(priv->dev, "failed to get ring chain\n");
h->ae_algo->ops->unmap_ring_from_vector(h,
- tqp_vector->vector_irq, &vector_ring_chain);
+ tqp_vector->vector_irq, vector_ring_chain);
- hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
+ hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain);
hns3_clear_ring_group(&tqp_vector->rx_group);
hns3_clear_ring_group(&tqp_vector->tx_group);
@@ -5347,9 +5321,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
if (ret) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, sw_err_cnt);
/* if alloc new buffer fail, exit directly
* and reclear in up flow.
*/
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 808405cc0280..a05a0c7423ce 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -10,6 +10,9 @@
#include "hnae3.h"
+struct iphdr;
+struct ipv6hdr;
+
enum hns3_nic_state {
HNS3_NIC_STATE_TESTING,
HNS3_NIC_STATE_RESETTING,
@@ -660,6 +663,13 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
#define hns3_buf_size(_ring) ((_ring)->buf_size)
+#define hns3_ring_stats_update(ring, cnt) do { \
+ typeof(ring) (tmp) = (ring); \
+ u64_stats_update_begin(&(tmp)->syncp); \
+ ((tmp)->stats.cnt)++; \
+ u64_stats_update_end(&(tmp)->syncp); \
+} while (0) \
+
static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
{
#if (PAGE_SIZE < 8192)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 65168125c42e..c287be8bc48d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -77,6 +77,10 @@ static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
.cmd = HCLGE_OPC_DFX_TQP_REG } },
};
+/* make sure: len(name) + interval >= maxlen(item data) + 2,
+ * for example, name = "pkt_num"(len: 7), the prototype of item data is u32,
+ * and print as "%u"(maxlen: 10), so the interval should be at least 5.
+ */
static void hclge_dbg_fill_content(char *content, u16 len,
const struct hclge_dbg_item *items,
const char **result, u16 size)
@@ -99,7 +103,7 @@ static void hclge_dbg_fill_content(char *content, u16 len,
static char *hclge_dbg_get_func_id_str(char *buf, u8 id)
{
if (id)
- sprintf(buf, "vf%u", id - 1);
+ sprintf(buf, "vf%u", id - 1U);
else
sprintf(buf, "pf");
@@ -778,7 +782,6 @@ static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
data_str = kcalloc(ARRAY_SIZE(tm_pg_items),
HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL);
-
if (!data_str)
return -ENOMEM;
@@ -1764,7 +1767,7 @@ hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
#define HCLGE_MAX_NCL_CONFIG_LENGTH 16384
static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
- char *buf, int *len, int *pos)
+ char *buf, int len, int *pos)
{
#define HCLGE_CMD_DATA_NUM 6
@@ -1776,7 +1779,7 @@ static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
if (i == 0 && j == 0)
continue;
- *pos += scnprintf(buf + *pos, *len - *pos,
+ *pos += scnprintf(buf + *pos, len - *pos,
"0x%04x | 0x%08x\n", offset,
le32_to_cpu(desc[i].data[j]));
@@ -1814,7 +1817,7 @@ hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len)
if (ret)
return ret;
- hclge_ncl_config_data_print(desc, &index, buf, &len, &pos);
+ hclge_ncl_config_data_print(desc, &index, buf, len, &pos);
}
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 1815fcf168b0..1d1c4514aac2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -1613,12 +1613,39 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
}
+static void hclge_init_tc_config(struct hclge_dev *hdev)
+{
+ unsigned int i;
+
+ if (hdev->tc_max > HNAE3_MAX_TC ||
+ hdev->tc_max < 1) {
+ dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
+ hdev->tc_max);
+ hdev->tc_max = 1;
+ }
+
+ /* Dev does not support DCB */
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ hdev->tc_max = 1;
+ hdev->pfc_max = 0;
+ } else {
+ hdev->pfc_max = hdev->tc_max;
+ }
+
+ hdev->tm_info.num_tc = 1;
+
+ /* Currently not support uncontiuous tc */
+ for (i = 0; i < hdev->tm_info.num_tc; i++)
+ hnae3_set_bit(hdev->hw_tc_map, i, 1);
+
+ hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
+}
+
static int hclge_configure(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
const struct cpumask *cpumask = cpu_online_mask;
struct hclge_cfg cfg;
- unsigned int i;
int node, ret;
ret = hclge_get_cfg(hdev, &cfg);
@@ -1662,29 +1689,7 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
- if ((hdev->tc_max > HNAE3_MAX_TC) ||
- (hdev->tc_max < 1)) {
- dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
- hdev->tc_max);
- hdev->tc_max = 1;
- }
-
- /* Dev does not support DCB */
- if (!hnae3_dev_dcb_supported(hdev)) {
- hdev->tc_max = 1;
- hdev->pfc_max = 0;
- } else {
- hdev->pfc_max = hdev->tc_max;
- }
-
- hdev->tm_info.num_tc = 1;
-
- /* Currently not support uncontiuous tc */
- for (i = 0; i < hdev->tm_info.num_tc; i++)
- hnae3_set_bit(hdev->hw_tc_map, i, 1);
-
- hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
-
+ hclge_init_tc_config(hdev);
hclge_init_kdump_kernel_config(hdev);
/* Set the affinity based on numa node */
@@ -1885,7 +1890,7 @@ static int hclge_map_tqp(struct hclge_dev *hdev)
u16 i, num_vport;
num_vport = hdev->num_req_vfs + 1;
- for (i = 0; i < num_vport; i++) {
+ for (i = 0; i < num_vport; i++) {
int ret;
ret = hclge_map_tqp_to_vport(hdev, vport);
@@ -6801,7 +6806,7 @@ static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
if (vf > hdev->num_req_vfs) {
dev_err(&hdev->pdev->dev,
"Error: vf id (%u) should be less than %u\n",
- vf - 1, hdev->num_req_vfs);
+ vf - 1U, hdev->num_req_vfs);
return -EINVAL;
}
@@ -6811,7 +6816,7 @@ static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
if (ring >= tqps) {
dev_err(&hdev->pdev->dev,
"Error: queue id (%u) > max tqp num (%u)\n",
- ring, tqps - 1);
+ ring, tqps - 1U);
return -EINVAL;
}
@@ -7172,6 +7177,37 @@ static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
}
}
+static struct hclge_fd_rule *hclge_get_fd_rule(struct hclge_dev *hdev,
+ u16 location)
+{
+ struct hclge_fd_rule *rule = NULL;
+ struct hlist_node *node2;
+
+ hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
+ if (rule->location == location)
+ return rule;
+ else if (rule->location > location)
+ return NULL;
+ }
+
+ return NULL;
+}
+
+static void hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule)
+{
+ if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
+ fs->ring_cookie = RX_CLS_FLOW_DISC;
+ } else {
+ u64 vf_id;
+
+ fs->ring_cookie = rule->queue_id;
+ vf_id = rule->vf_id;
+ vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+ fs->ring_cookie |= vf_id;
+ }
+}
+
static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd)
{
@@ -7179,7 +7215,6 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
struct hclge_fd_rule *rule = NULL;
struct hclge_dev *hdev = vport->back;
struct ethtool_rx_flow_spec *fs;
- struct hlist_node *node2;
if (!hnae3_dev_fd_supported(hdev))
return -EOPNOTSUPP;
@@ -7188,14 +7223,9 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
spin_lock_bh(&hdev->fd_rule_lock);
- hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
- if (rule->location >= fs->location)
- break;
- }
-
- if (!rule || fs->location != rule->location) {
+ rule = hclge_get_fd_rule(hdev, fs->location);
+ if (!rule) {
spin_unlock_bh(&hdev->fd_rule_lock);
-
return -ENOENT;
}
@@ -7233,16 +7263,7 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
hclge_fd_get_ext_info(fs, rule);
- if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
- fs->ring_cookie = RX_CLS_FLOW_DISC;
- } else {
- u64 vf_id;
-
- fs->ring_cookie = rule->queue_id;
- vf_id = rule->vf_id;
- vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
- fs->ring_cookie |= vf_id;
- }
+ hclge_fd_get_ring_cookie(fs, rule);
spin_unlock_bh(&hdev->fd_rule_lock);
@@ -7979,16 +8000,13 @@ static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
return ret;
}
-static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
- enum hnae3_loop loop_mode)
+static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev *hdev, bool en,
+ enum hnae3_loop loop_mode)
{
-#define HCLGE_COMMON_LB_RETRY_MS 10
-#define HCLGE_COMMON_LB_RETRY_NUM 100
-
struct hclge_common_lb_cmd *req;
struct hclge_desc desc;
- int ret, i = 0;
u8 loop_mode_b;
+ int ret;
req = (struct hclge_common_lb_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
@@ -8005,23 +8023,34 @@ static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
break;
default:
dev_err(&hdev->pdev->dev,
- "unsupported common loopback mode %d\n", loop_mode);
+ "unsupported loopback mode %d\n", loop_mode);
return -ENOTSUPP;
}
- if (en) {
+ req->mask = loop_mode_b;
+ if (en)
req->enable = loop_mode_b;
- req->mask = loop_mode_b;
- } else {
- req->mask = loop_mode_b;
- }
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
- "common loopback set fail, ret = %d\n", ret);
- return ret;
- }
+ "failed to send loopback cmd, loop_mode = %d, ret = %d\n",
+ loop_mode, ret);
+
+ return ret;
+}
+
+static int hclge_cfg_common_loopback_wait(struct hclge_dev *hdev)
+{
+#define HCLGE_COMMON_LB_RETRY_MS 10
+#define HCLGE_COMMON_LB_RETRY_NUM 100
+
+ struct hclge_common_lb_cmd *req;
+ struct hclge_desc desc;
+ u32 i = 0;
+ int ret;
+
+ req = (struct hclge_common_lb_cmd *)desc.data;
do {
msleep(HCLGE_COMMON_LB_RETRY_MS);
@@ -8030,20 +8059,34 @@ static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "common loopback get, ret = %d\n", ret);
+ "failed to get loopback done status, ret = %d\n",
+ ret);
return ret;
}
} while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
!(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
- dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
+ dev_err(&hdev->pdev->dev, "wait loopback timeout\n");
return -EBUSY;
} else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
- dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
+ dev_err(&hdev->pdev->dev, "failed to do loopback test\n");
return -EIO;
}
- return ret;
+
+ return 0;
+}
+
+static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
+ enum hnae3_loop loop_mode)
+{
+ int ret;
+
+ ret = hclge_cfg_common_loopback_cmd_send(hdev, en, loop_mode);
+ if (ret)
+ return ret;
+
+ return hclge_cfg_common_loopback_wait(hdev);
}
static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
@@ -9703,8 +9746,8 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed to get vlan filter config, ret = %d.\n", ret);
+ dev_err(&hdev->pdev->dev, "failed to get vport%u vlan filter config, ret = %d.\n",
+ vf_id, ret);
return ret;
}
@@ -9715,8 +9758,8 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
- dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
- ret);
+ dev_err(&hdev->pdev->dev, "failed to set vport%u vlan filter, ret = %d.\n",
+ vf_id, ret);
return ret;
}
@@ -9962,6 +10005,32 @@ static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
return ret;
}
+static bool hclge_need_update_port_vlan(struct hclge_dev *hdev, u16 vport_id,
+ u16 vlan_id, bool is_kill)
+{
+ /* vlan 0 may be added twice when 8021q module is enabled */
+ if (!is_kill && !vlan_id &&
+ test_bit(vport_id, hdev->vlan_table[vlan_id]))
+ return false;
+
+ if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
+ dev_warn(&hdev->pdev->dev,
+ "Add port vlan failed, vport %u is already in vlan %u\n",
+ vport_id, vlan_id);
+ return false;
+ }
+
+ if (is_kill &&
+ !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
+ dev_warn(&hdev->pdev->dev,
+ "Delete port vlan failed, vport %u is not in vlan %u\n",
+ vport_id, vlan_id);
+ return false;
+ }
+
+ return true;
+}
+
static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
u16 vport_id, u16 vlan_id,
bool is_kill)
@@ -9983,26 +10052,9 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
return ret;
}
- /* vlan 0 may be added twice when 8021q module is enabled */
- if (!is_kill && !vlan_id &&
- test_bit(vport_id, hdev->vlan_table[vlan_id]))
+ if (!hclge_need_update_port_vlan(hdev, vport_id, vlan_id, is_kill))
return 0;
- if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
- dev_err(&hdev->pdev->dev,
- "Add port vlan failed, vport %u is already in vlan %u\n",
- vport_id, vlan_id);
- return -EINVAL;
- }
-
- if (is_kill &&
- !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
- dev_err(&hdev->pdev->dev,
- "Delete port vlan failed, vport %u is not in vlan %u\n",
- vport_id, vlan_id);
- return -EINVAL;
- }
-
for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
vport_num++;
@@ -10194,67 +10246,80 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
return status;
}
-static int hclge_init_vlan_config(struct hclge_dev *hdev)
+static int hclge_init_vlan_filter(struct hclge_dev *hdev)
{
-#define HCLGE_DEF_VLAN_TYPE 0x8100
-
- struct hnae3_handle *handle = &hdev->vport[0].nic;
struct hclge_vport *vport;
int ret;
int i;
- if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
- /* for revision 0x21, vf vlan filter is per function */
- for (i = 0; i < hdev->num_alloc_vport; i++) {
- vport = &hdev->vport[i];
- ret = hclge_set_vlan_filter_ctrl(hdev,
- HCLGE_FILTER_TYPE_VF,
- HCLGE_FILTER_FE_EGRESS,
- true,
- vport->vport_id);
- if (ret)
- return ret;
- vport->cur_vlan_fltr_en = true;
- }
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS_V1_B,
+ true, 0);
- ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
- HCLGE_FILTER_FE_INGRESS, true,
- 0);
- if (ret)
- return ret;
- } else {
+ /* for revision 0x21, vf vlan filter is per function */
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
- HCLGE_FILTER_FE_EGRESS_V1_B,
- true, 0);
+ HCLGE_FILTER_FE_EGRESS, true,
+ vport->vport_id);
if (ret)
return ret;
+ vport->cur_vlan_fltr_en = true;
}
- hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
+ HCLGE_FILTER_FE_INGRESS, true, 0);
+}
- ret = hclge_set_vlan_protocol_type(hdev);
- if (ret)
- return ret;
+static int hclge_init_vlan_type(struct hclge_dev *hdev)
+{
+ hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q;
- for (i = 0; i < hdev->num_alloc_vport; i++) {
- u16 vlan_tag;
- u8 qos;
+ return hclge_set_vlan_protocol_type(hdev);
+}
+
+static int hclge_init_vport_vlan_offload(struct hclge_dev *hdev)
+{
+ struct hclge_port_base_vlan_config *cfg;
+ struct hclge_vport *vport;
+ int ret;
+ int i;
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
- vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
- qos = vport->port_base_vlan_cfg.vlan_info.qos;
+ cfg = &vport->port_base_vlan_cfg;
- ret = hclge_vlan_offload_cfg(vport,
- vport->port_base_vlan_cfg.state,
- vlan_tag, qos);
+ ret = hclge_vlan_offload_cfg(vport, cfg->state,
+ cfg->vlan_info.vlan_tag,
+ cfg->vlan_info.qos);
if (ret)
return ret;
}
+ return 0;
+}
+
+static int hclge_init_vlan_config(struct hclge_dev *hdev)
+{
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
+ int ret;
+
+ ret = hclge_init_vlan_filter(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_init_vlan_type(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_init_vport_vlan_offload(hdev);
+ if (ret)
+ return ret;
return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
}
@@ -10511,12 +10576,41 @@ static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
return false;
}
+static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport,
+ struct hclge_vlan_info *new_info,
+ struct hclge_vlan_info *old_info)
+{
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ /* add new VLAN tag */
+ ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto),
+ vport->vport_id, new_info->vlan_tag,
+ false);
+ if (ret)
+ return ret;
+
+ /* remove old VLAN tag */
+ if (old_info->vlan_tag == 0)
+ ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
+ true, 0);
+ else
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id,
+ old_info->vlan_tag, true);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to clear vport%u port base vlan %u, ret = %d.\n",
+ vport->vport_id, old_info->vlan_tag, ret);
+
+ return ret;
+}
+
int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
struct hclge_vlan_info *vlan_info)
{
struct hnae3_handle *nic = &vport->nic;
struct hclge_vlan_info *old_vlan_info;
- struct hclge_dev *hdev = vport->back;
int ret;
old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
@@ -10529,38 +10623,12 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
goto out;
- if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
- /* add new VLAN tag */
- ret = hclge_set_vlan_filter_hw(hdev,
- htons(vlan_info->vlan_proto),
- vport->vport_id,
- vlan_info->vlan_tag,
- false);
- if (ret)
- return ret;
-
- /* remove old VLAN tag */
- if (old_vlan_info->vlan_tag == 0)
- ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
- true, 0);
- else
- ret = hclge_set_vlan_filter_hw(hdev,
- htons(ETH_P_8021Q),
- vport->vport_id,
- old_vlan_info->vlan_tag,
- true);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed to clear vport%u port base vlan %u, ret = %d.\n",
- vport->vport_id, old_vlan_info->vlan_tag, ret);
- return ret;
- }
-
- goto out;
- }
-
- ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
- old_vlan_info);
+ if (state == HNAE3_PORT_BASE_VLAN_MODIFY)
+ ret = hclge_modify_port_base_vlan_tag(vport, vlan_info,
+ old_vlan_info);
+ else
+ ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
+ old_vlan_info);
if (ret)
return ret;
@@ -12310,19 +12378,42 @@ static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
*max_rss_size = hdev->pf_rss_size_max;
}
+static int hclge_set_rss_tc_mode_cfg(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
+ struct hclge_dev *hdev = vport->back;
+ u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
+ u16 tc_valid[HCLGE_MAX_TC_NUM];
+ u16 roundup_size;
+ unsigned int i;
+
+ roundup_size = roundup_pow_of_two(vport->nic.kinfo.rss_size);
+ roundup_size = ilog2(roundup_size);
+ /* Set the RSS TC mode according to the new RSS size */
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ tc_valid[i] = 0;
+
+ if (!(hdev->hw_tc_map & BIT(i)))
+ continue;
+
+ tc_valid[i] = 1;
+ tc_size[i] = roundup_size;
+ tc_offset[i] = vport->nic.kinfo.rss_size * i;
+ }
+
+ return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
+}
+
static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
bool rxfh_configured)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
- u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
struct hclge_dev *hdev = vport->back;
- u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
u16 cur_rss_size = kinfo->rss_size;
u16 cur_tqps = kinfo->num_tqps;
- u16 tc_valid[HCLGE_MAX_TC_NUM];
- u16 roundup_size;
u32 *rss_indir;
unsigned int i;
int ret;
@@ -12335,20 +12426,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
return ret;
}
- roundup_size = roundup_pow_of_two(kinfo->rss_size);
- roundup_size = ilog2(roundup_size);
- /* Set the RSS TC mode according to the new RSS size */
- for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
- tc_valid[i] = 0;
-
- if (!(hdev->hw_tc_map & BIT(i)))
- continue;
-
- tc_valid[i] = 1;
- tc_size[i] = roundup_size;
- tc_offset[i] = kinfo->rss_size * i;
- }
- ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
+ ret = hclge_set_rss_tc_mode_cfg(handle);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index c495df2e5953..8b3954b39147 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -181,7 +181,7 @@ static int hclge_get_ring_chain_from_mbx(
if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) {
dev_err(&hdev->pdev->dev, "tqp index(%u) is out of range(0-%u)\n",
req->msg.param[i].tqp_index,
- vport->nic.kinfo.rss_size - 1);
+ vport->nic.kinfo.rss_size - 1U);
return -EINVAL;
}
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
index fd0e20190b90..4200d0b6d931 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
@@ -4,6 +4,10 @@
#ifndef __HCLGE_MDIO_H
#define __HCLGE_MDIO_H
+#include "hnae3.h"
+
+struct hclge_dev;
+
int hclge_mac_mdio_config(struct hclge_dev *hdev);
int hclge_mac_connect_phy(struct hnae3_handle *handle);
void hclge_mac_disconnect_phy(struct hnae3_handle *handle);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
index 7a9b77de632a..bbee74cd8404 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
@@ -8,6 +8,9 @@
#include <linux/net_tstamp.h>
#include <linux/types.h>
+struct hclge_dev;
+struct ifreq;
+
#define HCLGE_PTP_REG_OFFSET 0x29000
#define HCLGE_PTP_TX_TS_SEQID_REG 0x0
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 1db7f40b4525..619cc30a2dfc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -6,6 +6,12 @@
#include <linux/types.h>
+#include "hnae3.h"
+
+struct hclge_dev;
+struct hclge_vport;
+enum hclge_opcode_type;
+
/* MAC Pause */
#define HCLGE_TX_MAC_PAUSE_EN_MSK BIT(0)
#define HCLGE_RX_MAC_PAUSE_EN_MSK BIT(1)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
index 06586173add7..998717f02136 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
@@ -814,7 +814,6 @@ static int api_chain_init(struct hinic_api_cmd_chain *chain,
{
struct hinic_hwif *hwif = attr->hwif;
struct pci_dev *pdev = hwif->pdev;
- size_t cell_ctxt_size;
chain->hwif = hwif;
chain->chain_type = attr->chain_type;
@@ -826,8 +825,8 @@ static int api_chain_init(struct hinic_api_cmd_chain *chain,
sema_init(&chain->sem, 1);
- cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt);
- chain->cell_ctxt = devm_kzalloc(&pdev->dev, cell_ctxt_size, GFP_KERNEL);
+ chain->cell_ctxt = devm_kcalloc(&pdev->dev, chain->num_cells,
+ sizeof(*chain->cell_ctxt), GFP_KERNEL);
if (!chain->cell_ctxt)
return -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
index 307a6d4af993..a627237f694b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
@@ -796,11 +796,10 @@ static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev,
struct hinic_cmdq_ctxt *cmdq_ctxts;
struct pci_dev *pdev = hwif->pdev;
struct hinic_pfhwdev *pfhwdev;
- size_t cmdq_ctxts_size;
int err;
- cmdq_ctxts_size = HINIC_MAX_CMDQ_TYPES * sizeof(*cmdq_ctxts);
- cmdq_ctxts = devm_kzalloc(&pdev->dev, cmdq_ctxts_size, GFP_KERNEL);
+ cmdq_ctxts = devm_kcalloc(&pdev->dev, HINIC_MAX_CMDQ_TYPES,
+ sizeof(*cmdq_ctxts), GFP_KERNEL);
if (!cmdq_ctxts)
return -ENOMEM;
@@ -884,7 +883,6 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs);
struct pci_dev *pdev = hwif->pdev;
struct hinic_hwdev *hwdev;
- size_t saved_wqs_size;
u16 max_wqe_size;
int err;
@@ -895,8 +893,8 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
if (!cmdqs->cmdq_buf_pool)
return -ENOMEM;
- saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq);
- cmdqs->saved_wqs = devm_kzalloc(&pdev->dev, saved_wqs_size, GFP_KERNEL);
+ cmdqs->saved_wqs = devm_kcalloc(&pdev->dev, HINIC_MAX_CMDQ_TYPES,
+ sizeof(*cmdqs->saved_wqs), GFP_KERNEL);
if (!cmdqs->saved_wqs) {
err = -ENOMEM;
goto err_saved_wqs;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 657a15447bd0..2127a48749a8 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -162,7 +162,6 @@ static int init_msix(struct hinic_hwdev *hwdev)
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
int nr_irqs, num_aeqs, num_ceqs;
- size_t msix_entries_size;
int i, err;
num_aeqs = HINIC_HWIF_NUM_AEQS(hwif);
@@ -171,8 +170,8 @@ static int init_msix(struct hinic_hwdev *hwdev)
if (nr_irqs > HINIC_HWIF_NUM_IRQS(hwif))
nr_irqs = HINIC_HWIF_NUM_IRQS(hwif);
- msix_entries_size = nr_irqs * sizeof(*hwdev->msix_entries);
- hwdev->msix_entries = devm_kzalloc(&pdev->dev, msix_entries_size,
+ hwdev->msix_entries = devm_kcalloc(&pdev->dev, nr_irqs,
+ sizeof(*hwdev->msix_entries),
GFP_KERNEL);
if (!hwdev->msix_entries)
return -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
index d3fc05a07fdb..045c47786a04 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
@@ -631,16 +631,15 @@ static int alloc_eq_pages(struct hinic_eq *eq)
struct hinic_hwif *hwif = eq->hwif;
struct pci_dev *pdev = hwif->pdev;
u32 init_val, addr, val;
- size_t addr_size;
int err, pg;
- addr_size = eq->num_pages * sizeof(*eq->dma_addr);
- eq->dma_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL);
+ eq->dma_addr = devm_kcalloc(&pdev->dev, eq->num_pages,
+ sizeof(*eq->dma_addr), GFP_KERNEL);
if (!eq->dma_addr)
return -ENOMEM;
- addr_size = eq->num_pages * sizeof(*eq->virt_addr);
- eq->virt_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL);
+ eq->virt_addr = devm_kcalloc(&pdev->dev, eq->num_pages,
+ sizeof(*eq->virt_addr), GFP_KERNEL);
if (!eq->virt_addr) {
err = -ENOMEM;
goto err_virt_addr_alloc;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
index a6e43d686293..c4a0ba6e183a 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
@@ -375,31 +375,30 @@ int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
{
struct hinic_hwif *hwif = func_to_io->hwif;
struct pci_dev *pdev = hwif->pdev;
- size_t qps_size, wq_size, db_size;
void *ci_addr_base;
int i, j, err;
- qps_size = num_qps * sizeof(*func_to_io->qps);
- func_to_io->qps = devm_kzalloc(&pdev->dev, qps_size, GFP_KERNEL);
+ func_to_io->qps = devm_kcalloc(&pdev->dev, num_qps,
+ sizeof(*func_to_io->qps), GFP_KERNEL);
if (!func_to_io->qps)
return -ENOMEM;
- wq_size = num_qps * sizeof(*func_to_io->sq_wq);
- func_to_io->sq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL);
+ func_to_io->sq_wq = devm_kcalloc(&pdev->dev, num_qps,
+ sizeof(*func_to_io->sq_wq), GFP_KERNEL);
if (!func_to_io->sq_wq) {
err = -ENOMEM;
goto err_sq_wq;
}
- wq_size = num_qps * sizeof(*func_to_io->rq_wq);
- func_to_io->rq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL);
+ func_to_io->rq_wq = devm_kcalloc(&pdev->dev, num_qps,
+ sizeof(*func_to_io->rq_wq), GFP_KERNEL);
if (!func_to_io->rq_wq) {
err = -ENOMEM;
goto err_rq_wq;
}
- db_size = num_qps * sizeof(*func_to_io->sq_db);
- func_to_io->sq_db = devm_kzalloc(&pdev->dev, db_size, GFP_KERNEL);
+ func_to_io->sq_db = devm_kcalloc(&pdev->dev, num_qps,
+ sizeof(*func_to_io->sq_db), GFP_KERNEL);
if (!func_to_io->sq_db) {
err = -ENOMEM;
goto err_sq_db;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
index 7f0f1aa3cedd..2d9b06d7caad 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
@@ -193,20 +193,20 @@ static int alloc_page_arrays(struct hinic_wqs *wqs)
{
struct hinic_hwif *hwif = wqs->hwif;
struct pci_dev *pdev = hwif->pdev;
- size_t size;
- size = wqs->num_pages * sizeof(*wqs->page_paddr);
- wqs->page_paddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wqs->page_paddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
+ sizeof(*wqs->page_paddr), GFP_KERNEL);
if (!wqs->page_paddr)
return -ENOMEM;
- size = wqs->num_pages * sizeof(*wqs->page_vaddr);
- wqs->page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wqs->page_vaddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
+ sizeof(*wqs->page_vaddr), GFP_KERNEL);
if (!wqs->page_vaddr)
goto err_page_vaddr;
- size = wqs->num_pages * sizeof(*wqs->shadow_page_vaddr);
- wqs->shadow_page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wqs->shadow_page_vaddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
+ sizeof(*wqs->shadow_page_vaddr),
+ GFP_KERNEL);
if (!wqs->shadow_page_vaddr)
goto err_page_shadow_vaddr;
@@ -379,15 +379,14 @@ static int alloc_wqes_shadow(struct hinic_wq *wq)
{
struct hinic_hwif *hwif = wq->hwif;
struct pci_dev *pdev = hwif->pdev;
- size_t size;
- size = wq->num_q_pages * wq->max_wqe_size;
- wq->shadow_wqe = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wq->shadow_wqe = devm_kcalloc(&pdev->dev, wq->num_q_pages,
+ wq->max_wqe_size, GFP_KERNEL);
if (!wq->shadow_wqe)
return -ENOMEM;
- size = wq->num_q_pages * sizeof(wq->prod_idx);
- wq->shadow_idx = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wq->shadow_idx = devm_kcalloc(&pdev->dev, wq->num_q_pages,
+ sizeof(wq->prod_idx), GFP_KERNEL);
if (!wq->shadow_idx)
goto err_shadow_idx;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index f9a766b8ac43..1e1b1be86174 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -144,13 +144,12 @@ static int create_txqs(struct hinic_dev *nic_dev)
{
int err, i, j, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
struct net_device *netdev = nic_dev->netdev;
- size_t txq_size;
if (nic_dev->txqs)
return -EINVAL;
- txq_size = num_txqs * sizeof(*nic_dev->txqs);
- nic_dev->txqs = devm_kzalloc(&netdev->dev, txq_size, GFP_KERNEL);
+ nic_dev->txqs = devm_kcalloc(&netdev->dev, num_txqs,
+ sizeof(*nic_dev->txqs), GFP_KERNEL);
if (!nic_dev->txqs)
return -ENOMEM;
@@ -241,13 +240,12 @@ static int create_rxqs(struct hinic_dev *nic_dev)
{
int err, i, j, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev);
struct net_device *netdev = nic_dev->netdev;
- size_t rxq_size;
if (nic_dev->rxqs)
return -EINVAL;
- rxq_size = num_rxqs * sizeof(*nic_dev->rxqs);
- nic_dev->rxqs = devm_kzalloc(&netdev->dev, rxq_size, GFP_KERNEL);
+ nic_dev->rxqs = devm_kcalloc(&netdev->dev, num_rxqs,
+ sizeof(*nic_dev->rxqs), GFP_KERNEL);
if (!nic_dev->rxqs)
return -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
index a78c398bf5b2..01e7d3c0b68e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
@@ -8,6 +8,7 @@
#include <linux/interrupt.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
+#include <linux/module.h>
#include "hinic_hw_dev.h"
#include "hinic_dev.h"
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index c5bdb0d374ef..a984a7a6dd2e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -862,7 +862,6 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
struct hinic_dev *nic_dev = netdev_priv(netdev);
struct hinic_hwdev *hwdev = nic_dev->hwdev;
int err, irqname_len;
- size_t sges_size;
txq->netdev = netdev;
txq->sq = sq;
@@ -871,13 +870,13 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
txq->max_sges = HINIC_MAX_SQ_BUFDESCS;
- sges_size = txq->max_sges * sizeof(*txq->sges);
- txq->sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL);
+ txq->sges = devm_kcalloc(&netdev->dev, txq->max_sges,
+ sizeof(*txq->sges), GFP_KERNEL);
if (!txq->sges)
return -ENOMEM;
- sges_size = txq->max_sges * sizeof(*txq->free_sges);
- txq->free_sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL);
+ txq->free_sges = devm_kcalloc(&netdev->dev, txq->max_sges,
+ sizeof(*txq->free_sges), GFP_KERNEL);
if (!txq->free_sges) {
err = -ENOMEM;
goto err_alloc_free_sges;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index ad9a2819e202..59536bd5cab1 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -308,7 +308,7 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
if (adapter->fw_done_rc) {
dev_err(dev, "Couldn't map LTB, rc = %d\n",
adapter->fw_done_rc);
- rc = -1;
+ rc = -EIO;
goto out;
}
rc = 0;
@@ -540,13 +540,15 @@ static int init_stats_token(struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
dma_addr_t stok;
+ int rc;
stok = dma_map_single(dev, &adapter->stats,
sizeof(struct ibmvnic_statistics),
DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, stok)) {
- dev_err(dev, "Couldn't map stats buffer\n");
- return -1;
+ rc = dma_mapping_error(dev, stok);
+ if (rc) {
+ dev_err(dev, "Couldn't map stats buffer, rc = %d\n", rc);
+ return rc;
}
adapter->stats_token = stok;
@@ -628,17 +630,9 @@ static bool reuse_rx_pools(struct ibmvnic_adapter *adapter)
old_buff_size = adapter->prev_rx_buf_sz;
new_buff_size = adapter->cur_rx_buf_sz;
- /* Require buff size to be exactly same for now */
- if (old_buff_size != new_buff_size)
- return false;
-
- if (old_num_pools == new_num_pools && old_pool_size == new_pool_size)
- return true;
-
- if (old_num_pools < adapter->min_rx_queues ||
- old_num_pools > adapter->max_rx_queues ||
- old_pool_size < adapter->min_rx_add_entries_per_subcrq ||
- old_pool_size > adapter->max_rx_add_entries_per_subcrq)
+ if (old_buff_size != new_buff_size ||
+ old_num_pools != new_num_pools ||
+ old_pool_size != new_pool_size)
return false;
return true;
@@ -663,7 +657,7 @@ static int init_rx_pools(struct net_device *netdev)
u64 num_pools;
u64 pool_size; /* # of buffers in one pool */
u64 buff_size;
- int i, j;
+ int i, j, rc;
pool_size = adapter->req_rx_add_entries_per_subcrq;
num_pools = adapter->req_rx_queues;
@@ -682,7 +676,7 @@ static int init_rx_pools(struct net_device *netdev)
GFP_KERNEL);
if (!adapter->rx_pool) {
dev_err(dev, "Failed to allocate rx pools\n");
- return -1;
+ return -ENOMEM;
}
/* Set num_active_rx_pools early. If we fail below after partial
@@ -705,6 +699,7 @@ static int init_rx_pools(struct net_device *netdev)
GFP_KERNEL);
if (!rx_pool->free_map) {
dev_err(dev, "Couldn't alloc free_map %d\n", i);
+ rc = -ENOMEM;
goto out_release;
}
@@ -713,6 +708,7 @@ static int init_rx_pools(struct net_device *netdev)
GFP_KERNEL);
if (!rx_pool->rx_buff) {
dev_err(dev, "Couldn't alloc rx buffers\n");
+ rc = -ENOMEM;
goto out_release;
}
}
@@ -726,8 +722,9 @@ update_ltb:
dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n",
i, rx_pool->size, rx_pool->buff_size);
- if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
- rx_pool->size * rx_pool->buff_size))
+ rc = alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
+ rx_pool->size * rx_pool->buff_size);
+ if (rc)
goto out;
for (j = 0; j < rx_pool->size; ++j) {
@@ -764,7 +761,7 @@ out:
/* We failed to allocate one or more LTBs or map them on the VIOS.
* Hold onto the pools and any LTBs that we did allocate/map.
*/
- return -1;
+ return rc;
}
static void release_vpd_data(struct ibmvnic_adapter *adapter)
@@ -825,13 +822,13 @@ static int init_one_tx_pool(struct net_device *netdev,
sizeof(struct ibmvnic_tx_buff),
GFP_KERNEL);
if (!tx_pool->tx_buff)
- return -1;
+ return -ENOMEM;
tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL);
if (!tx_pool->free_map) {
kfree(tx_pool->tx_buff);
tx_pool->tx_buff = NULL;
- return -1;
+ return -ENOMEM;
}
for (i = 0; i < pool_size; i++)
@@ -874,17 +871,9 @@ static bool reuse_tx_pools(struct ibmvnic_adapter *adapter)
old_mtu = adapter->prev_mtu;
new_mtu = adapter->req_mtu;
- /* Require MTU to be exactly same to reuse pools for now */
- if (old_mtu != new_mtu)
- return false;
-
- if (old_num_pools == new_num_pools && old_pool_size == new_pool_size)
- return true;
-
- if (old_num_pools < adapter->min_tx_queues ||
- old_num_pools > adapter->max_tx_queues ||
- old_pool_size < adapter->min_tx_entries_per_subcrq ||
- old_pool_size > adapter->max_tx_entries_per_subcrq)
+ if (old_mtu != new_mtu ||
+ old_num_pools != new_num_pools ||
+ old_pool_size != new_pool_size)
return false;
return true;
@@ -930,7 +919,7 @@ static int init_tx_pools(struct net_device *netdev)
adapter->tx_pool = kcalloc(num_pools,
sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
if (!adapter->tx_pool)
- return -1;
+ return -ENOMEM;
adapter->tso_pool = kcalloc(num_pools,
sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
@@ -940,7 +929,7 @@ static int init_tx_pools(struct net_device *netdev)
if (!adapter->tso_pool) {
kfree(adapter->tx_pool);
adapter->tx_pool = NULL;
- return -1;
+ return -ENOMEM;
}
/* Set num_active_tx_pools early. If we fail below after partial
@@ -1129,7 +1118,7 @@ static int ibmvnic_login(struct net_device *netdev)
retry = false;
if (retry_count > retries) {
netdev_warn(netdev, "Login attempts exceeded\n");
- return -1;
+ return -EACCES;
}
adapter->init_done_rc = 0;
@@ -1170,25 +1159,26 @@ static int ibmvnic_login(struct net_device *netdev)
timeout)) {
netdev_warn(netdev,
"Capabilities query timed out\n");
- return -1;
+ return -ETIMEDOUT;
}
rc = init_sub_crqs(adapter);
if (rc) {
netdev_warn(netdev,
"SCRQ initialization failed\n");
- return -1;
+ return rc;
}
rc = init_sub_crq_irqs(adapter);
if (rc) {
netdev_warn(netdev,
"SCRQ irq initialization failed\n");
- return -1;
+ return rc;
}
} else if (adapter->init_done_rc) {
- netdev_warn(netdev, "Adapter login failed\n");
- return -1;
+ netdev_warn(netdev, "Adapter login failed, init_done_rc = %d\n",
+ adapter->init_done_rc);
+ return -EIO;
}
} while (retry);
@@ -1247,7 +1237,7 @@ static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
if (!wait_for_completion_timeout(&adapter->init_done,
timeout)) {
netdev_err(netdev, "timeout setting link state\n");
- return -1;
+ return -ETIMEDOUT;
}
if (adapter->init_done_rc == PARTIALSUCCESS) {
@@ -2304,7 +2294,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
/* If someone else changed the adapter state
* when we dropped the rtnl, fail the reset
*/
- rc = -1;
+ rc = -EAGAIN;
goto out;
}
adapter->state = VNIC_CLOSED;
@@ -2346,10 +2336,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
}
rc = ibmvnic_reset_init(adapter, true);
- if (rc) {
- rc = IBMVNIC_INIT_FAILED;
+ if (rc)
goto out;
- }
/* If the adapter was in PROBE or DOWN state prior to the reset,
* exit here.
@@ -3779,7 +3767,7 @@ static int init_sub_crqs(struct ibmvnic_adapter *adapter)
allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
if (!allqueues)
- return -1;
+ return -ENOMEM;
for (i = 0; i < total_queues; i++) {
allqueues[i] = init_sub_crq_queue(adapter);
@@ -3848,7 +3836,7 @@ tx_failed:
for (i = 0; i < registered_queues; i++)
release_sub_crq_queue(adapter, allqueues[i], 1);
kfree(allqueues);
- return -1;
+ return -ENOMEM;
}
static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
@@ -4207,7 +4195,7 @@ static int send_login(struct ibmvnic_adapter *adapter)
if (!adapter->tx_scrq || !adapter->rx_scrq) {
netdev_err(adapter->netdev,
"RX or TX queues are not allocated, device login failed\n");
- return -1;
+ return -ENOMEM;
}
release_login_buffer(adapter);
@@ -4327,7 +4315,7 @@ buf_map_failed:
kfree(login_buffer);
adapter->login_buf = NULL;
buf_alloc_failed:
- return -1;
+ return -ENOMEM;
}
static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
@@ -5648,7 +5636,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
dev_err(dev, "Initialization sequence timed out\n");
- return -1;
+ return -ETIMEDOUT;
}
if (adapter->init_done_rc) {
@@ -5659,7 +5647,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
if (adapter->from_passive_init) {
adapter->state = VNIC_OPEN;
adapter->from_passive_init = false;
- return -1;
+ return -EINVAL;
}
if (reset &&
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index b8e42f67d897..4a8f36e0ab07 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -18,8 +18,6 @@
#define IBMVNIC_NAME "ibmvnic"
#define IBMVNIC_DRIVER_VERSION "1.0.1"
#define IBMVNIC_INVALID_MAP -1
-#define IBMVNIC_STATS_TIMEOUT 1
-#define IBMVNIC_INIT_FAILED 2
#define IBMVNIC_OPEN_FAILED 3
/* basic structures plus 100 2k buffers */
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 44e2dc8328a2..635a95927e93 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3614,10 +3614,6 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
return -EINVAL;
- /* flags reserved for future extensions - must be zero */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tsync_tx_ctl = 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 291e61ac3e44..2c1b1da1220e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -553,6 +553,14 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid);
return;
}
+ if (vsi->type != I40E_VSI_MAIN &&
+ vsi->type != I40E_VSI_FDIR &&
+ vsi->type != I40E_VSI_VMDQ2) {
+ dev_info(&pf->pdev->dev,
+ "vsi %d type %d descriptor rings not available\n",
+ vsi_seid, vsi->type);
+ return;
+ }
if (type == RING_TYPE_XDP && !i40e_enabled_xdp_vsi(vsi)) {
dev_info(&pf->pdev->dev, "XDP not enabled on VSI %d\n", vsi_seid);
return;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 09b1d5aed1c9..61e5789d78db 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -1205,10 +1205,6 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
INIT_WORK(&pf->ptp_extts0_work, i40e_ptp_extts0_work);
- /* Reserved for future extensions. */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
pf->ptp_tx = false;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 80ae264c99ba..2ea4deb8fc44 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1949,6 +1949,32 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
}
/**
+ * i40e_sync_vf_state
+ * @vf: pointer to the VF info
+ * @state: VF state
+ *
+ * Called from a VF message to synchronize the service with a potential
+ * VF reset state
+ **/
+static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state)
+{
+ int i;
+
+ /* When handling some messages, it needs VF state to be set.
+ * It is possible that this flag is cleared during VF reset,
+ * so there is a need to wait until the end of the reset to
+ * handle the request message correctly.
+ */
+ for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) {
+ if (test_bit(state, &vf->vf_states))
+ return true;
+ usleep_range(10000, 20000);
+ }
+
+ return test_bit(state, &vf->vf_states);
+}
+
+/**
* i40e_vc_get_version_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2008,7 +2034,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
size_t len = 0;
int ret;
- if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -2131,7 +2157,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
bool allmulti = false;
bool alluni = false;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err_out;
}
@@ -2219,7 +2245,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
struct i40e_vsi *vsi;
u16 num_qps_all = 0;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2368,7 +2394,7 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
i40e_status aq_ret = 0;
int i;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2540,7 +2566,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2590,7 +2616,7 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
u8 cur_pairs = vf->num_queue_pairs;
struct i40e_pf *pf = vf->pf;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE))
return -EINVAL;
if (req_pairs > I40E_MAX_VF_QUEUES) {
@@ -2635,7 +2661,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
memset(&stats, 0, sizeof(struct i40e_eth_stats));
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2752,7 +2778,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
i40e_status ret = 0;
int i;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
!i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
ret = I40E_ERR_PARAM;
goto error_param;
@@ -2824,7 +2850,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
i40e_status ret = 0;
int i;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
!i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
ret = I40E_ERR_PARAM;
goto error_param;
@@ -2968,7 +2994,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
i40e_status aq_ret = 0;
int i;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
!i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
@@ -3088,9 +3114,9 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
struct i40e_vsi *vsi = NULL;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
!i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
- (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
+ vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3119,9 +3145,9 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
i40e_status aq_ret = 0;
u16 i;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
!i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
- (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
+ vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3154,7 +3180,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
i40e_status aq_ret = 0;
int len = 0;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3190,7 +3216,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
struct i40e_hw *hw = &pf->hw;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3215,7 +3241,7 @@ static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
i40e_status aq_ret = 0;
struct i40e_vsi *vsi;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3241,7 +3267,7 @@ static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
i40e_status aq_ret = 0;
struct i40e_vsi *vsi;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3468,7 +3494,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
i40e_status aq_ret = 0;
int i, ret;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3599,7 +3625,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
i40e_status aq_ret = 0;
int i, ret;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err_out;
}
@@ -3708,7 +3734,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
i40e_status aq_ret = 0;
u64 speed = 0;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3797,11 +3823,6 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
/* set this flag only after making sure all inputs are sane */
vf->adq_enabled = true;
- /* num_req_queues is set when user changes number of queues via ethtool
- * and this causes issue for default VSI(which depends on this variable)
- * when ADq is enabled, hence reset it.
- */
- vf->num_req_queues = 0;
/* reset the VF in order to allocate resources */
i40e_vc_reset_vf(vf, true);
@@ -3824,7 +3845,7 @@ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 091e32c1bb46..49575a640a84 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -18,6 +18,8 @@
#define I40E_MAX_VF_PROMISC_FLAGS 3
+#define I40E_VF_STATE_WAIT_COUNT 20
+
/* Various queue ctrls */
enum i40e_queue_ctrl {
I40E_QUEUE_CTRL_UNKNOWN = 0,
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 3789269ce741..b5728bdbcf33 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -137,9 +137,13 @@ struct iavf_q_vector {
struct iavf_mac_filter {
struct list_head list;
u8 macaddr[ETH_ALEN];
- bool is_new_mac; /* filter is new, wait for PF decision */
- bool remove; /* filter needs to be removed */
- bool add; /* filter needs to be added */
+ struct {
+ u8 is_new_mac:1; /* filter is new, wait for PF decision */
+ u8 remove:1; /* filter needs to be removed */
+ u8 add:1; /* filter needs to be added */
+ u8 is_primary:1; /* filter is a default VF MAC */
+ u8 padding:4;
+ };
};
struct iavf_vlan_filter {
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 27c7b36427d2..3bb56714beb0 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -331,9 +331,16 @@ static int iavf_get_link_ksettings(struct net_device *netdev,
**/
static int iavf_get_sset_count(struct net_device *netdev, int sset)
{
+ /* Report the maximum number queues, even if not every queue is
+ * currently configured. Since allocation of queues is in pairs,
+ * use netdev->real_num_tx_queues * 2. The real_num_tx_queues is set
+ * at device creation and never changes.
+ */
+
if (sset == ETH_SS_STATS)
return IAVF_STATS_LEN +
- (IAVF_QUEUE_STATS_LEN * 2 * IAVF_MAX_REQ_QUEUES);
+ (IAVF_QUEUE_STATS_LEN * 2 *
+ netdev->real_num_tx_queues);
else if (sset == ETH_SS_PRIV_FLAGS)
return IAVF_PRIV_FLAGS_STR_LEN;
else
@@ -360,17 +367,18 @@ static void iavf_get_ethtool_stats(struct net_device *netdev,
iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats);
rcu_read_lock();
- for (i = 0; i < IAVF_MAX_REQ_QUEUES; i++) {
+ /* As num_active_queues describe both tx and rx queues, we can use
+ * it to iterate over rings' stats.
+ */
+ for (i = 0; i < adapter->num_active_queues; i++) {
struct iavf_ring *ring;
- /* Avoid accessing un-allocated queues */
- ring = (i < adapter->num_active_queues ?
- &adapter->tx_rings[i] : NULL);
+ /* Tx rings stats */
+ ring = &adapter->tx_rings[i];
iavf_add_queue_stats(&data, ring);
- /* Avoid accessing un-allocated queues */
- ring = (i < adapter->num_active_queues ?
- &adapter->rx_rings[i] : NULL);
+ /* Rx rings stats */
+ ring = &adapter->rx_rings[i];
iavf_add_queue_stats(&data, ring);
}
rcu_read_unlock();
@@ -407,10 +415,10 @@ static void iavf_get_stat_strings(struct net_device *netdev, u8 *data)
iavf_add_stat_strings(&data, iavf_gstrings_stats);
- /* Queues are always allocated in pairs, so we just use num_tx_queues
- * for both Tx and Rx queues.
+ /* Queues are always allocated in pairs, so we just use
+ * real_num_tx_queues for both Tx and Rx queues.
*/
- for (i = 0; i < netdev->num_tx_queues; i++) {
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
iavf_add_stat_strings(&data, iavf_gstrings_queue_stats,
"tx", i);
iavf_add_stat_strings(&data, iavf_gstrings_queue_stats,
@@ -623,23 +631,44 @@ static int iavf_set_ringparam(struct net_device *netdev,
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
- new_tx_count = clamp_t(u32, ring->tx_pending,
- IAVF_MIN_TXD,
- IAVF_MAX_TXD);
- new_tx_count = ALIGN(new_tx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+ if (ring->tx_pending > IAVF_MAX_TXD ||
+ ring->tx_pending < IAVF_MIN_TXD ||
+ ring->rx_pending > IAVF_MAX_RXD ||
+ ring->rx_pending < IAVF_MIN_RXD) {
+ netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
+ ring->tx_pending, ring->rx_pending, IAVF_MIN_TXD,
+ IAVF_MAX_RXD, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+ return -EINVAL;
+ }
+
+ new_tx_count = ALIGN(ring->tx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+ if (new_tx_count != ring->tx_pending)
+ netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n",
+ new_tx_count);
- new_rx_count = clamp_t(u32, ring->rx_pending,
- IAVF_MIN_RXD,
- IAVF_MAX_RXD);
- new_rx_count = ALIGN(new_rx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+ new_rx_count = ALIGN(ring->rx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+ if (new_rx_count != ring->rx_pending)
+ netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n",
+ new_rx_count);
/* if nothing to do return success */
if ((new_tx_count == adapter->tx_desc_count) &&
- (new_rx_count == adapter->rx_desc_count))
+ (new_rx_count == adapter->rx_desc_count)) {
+ netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
return 0;
+ }
- adapter->tx_desc_count = new_tx_count;
- adapter->rx_desc_count = new_rx_count;
+ if (new_tx_count != adapter->tx_desc_count) {
+ netdev_dbg(netdev, "Changing Tx descriptor count from %d to %d\n",
+ adapter->tx_desc_count, new_tx_count);
+ adapter->tx_desc_count = new_tx_count;
+ }
+
+ if (new_rx_count != adapter->rx_desc_count) {
+ netdev_dbg(netdev, "Changing Rx descriptor count from %d to %d\n",
+ adapter->rx_desc_count, new_rx_count);
+ adapter->rx_desc_count = new_rx_count;
+ }
if (netif_running(netdev)) {
adapter->flags |= IAVF_FLAG_RESET_NEEDED;
@@ -1910,7 +1939,7 @@ static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
* @key: hash key
* @hfunc: hash function to use
*
- * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
+ * Returns -EINVAL if the table specifies an invalid queue id, otherwise
* returns 0 after programming the table.
**/
static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
@@ -1919,19 +1948,21 @@ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
struct iavf_adapter *adapter = netdev_priv(netdev);
u16 i;
- /* We do not allow change in unsupported parameters */
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ /* Only support toeplitz hash function */
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (!indir)
+
+ if (!key && !indir)
return 0;
if (key)
memcpy(adapter->rss_key, key, adapter->rss_key_size);
- /* Each 32 bits pointed by 'indir' is stored with a lut entry */
- for (i = 0; i < adapter->rss_lut_size; i++)
- adapter->rss_lut[i] = (u8)(indir[i]);
+ if (indir) {
+ /* Each 32 bits pointed by 'indir' is stored with a lut entry */
+ for (i = 0; i < adapter->rss_lut_size; i++)
+ adapter->rss_lut[i] = (u8)(indir[i]);
+ }
return iavf_config_rss(adapter);
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 14934a7a13ef..3a820b3ff466 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -463,14 +463,14 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
if (q_vector->tx.ring && q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name),
- "iavf-%s-TxRx-%d", basename, rx_int_idx++);
+ "iavf-%s-TxRx-%u", basename, rx_int_idx++);
tx_int_idx++;
} else if (q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name),
- "iavf-%s-rx-%d", basename, rx_int_idx++);
+ "iavf-%s-rx-%u", basename, rx_int_idx++);
} else if (q_vector->tx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name),
- "iavf-%s-tx-%d", basename, tx_int_idx++);
+ "iavf-%s-tx-%u", basename, tx_int_idx++);
} else {
/* skip this unused q_vector */
continue;
@@ -2248,6 +2248,7 @@ static void iavf_reset_task(struct work_struct *work)
}
pci_set_master(adapter->pdev);
+ pci_restore_msi_state(adapter->pdev);
if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
@@ -2910,7 +2911,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
match.mask->dst);
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
}
@@ -2920,7 +2921,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
match.mask->src);
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
}
@@ -2955,7 +2956,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
match.mask->vlan_id);
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
}
vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
@@ -2979,7 +2980,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
be32_to_cpu(match.mask->dst));
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
}
@@ -2989,13 +2990,13 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
be32_to_cpu(match.mask->dst));
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
}
if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
if (match.key->dst) {
vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
@@ -3016,7 +3017,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
if (ipv6_addr_any(&match.mask->dst)) {
dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
IPV6_ADDR_ANY);
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
/* src and dest IPv6 address should not be LOOPBACK
@@ -3026,7 +3027,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
ipv6_addr_loopback(&match.key->src)) {
dev_err(&adapter->pdev->dev,
"ipv6 addr should not be loopback\n");
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
if (!ipv6_addr_any(&match.mask->dst) ||
!ipv6_addr_any(&match.mask->src))
@@ -3051,7 +3052,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
be16_to_cpu(match.mask->src));
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
}
@@ -3061,7 +3062,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
be16_to_cpu(match.mask->dst));
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
}
if (match.key->dst) {
@@ -3428,6 +3429,8 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
+ netdev_dbg(netdev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (CLIENT_ENABLED(adapter)) {
iavf_notify_client_l2_params(&adapter->vsi);
@@ -3998,6 +4001,7 @@ static void iavf_remove(struct pci_dev *pdev)
if (iavf_lock_timeout(&adapter->crit_lock, 5000))
dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
+ dev_info(&adapter->pdev->dev, "Removing device\n");
/* Shut down all the garbage mashers on the detention level */
iavf_change_state(adapter, __IAVF_REMOVE);
adapter->aq_required = 0;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 3525eab8e9f9..42c9f9dc235c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -1766,7 +1766,7 @@ tx_only:
if (likely(napi_complete_done(napi, work_done)))
iavf_update_enable_itr(vsi, q_vector);
- return min(work_done, budget - 1);
+ return min_t(int, work_done, budget - 1);
}
/**
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index d60bf7c21200..cfa1f0e0e2fe 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -762,15 +762,23 @@ void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags)
if (flags & FLAG_VF_MULTICAST_PROMISC) {
adapter->flags |= IAVF_FLAG_ALLMULTI_ON;
adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI;
- dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
+ dev_info(&adapter->pdev->dev, "%s is entering multicast promiscuous mode\n",
+ adapter->netdev->name);
}
if (!flags) {
- adapter->flags &= ~(IAVF_FLAG_PROMISC_ON |
- IAVF_FLAG_ALLMULTI_ON);
- adapter->aq_required &= ~(IAVF_FLAG_AQ_RELEASE_PROMISC |
- IAVF_FLAG_AQ_RELEASE_ALLMULTI);
- dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
+ if (adapter->flags & IAVF_FLAG_PROMISC_ON) {
+ adapter->flags &= ~IAVF_FLAG_PROMISC_ON;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_PROMISC;
+ dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
+ }
+
+ if (adapter->flags & IAVF_FLAG_ALLMULTI_ON) {
+ adapter->flags &= ~IAVF_FLAG_ALLMULTI_ON;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_ALLMULTI;
+ dev_info(&adapter->pdev->dev, "%s is leaving multicast promiscuous mode\n",
+ adapter->netdev->name);
+ }
}
adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
@@ -1017,7 +1025,7 @@ print_link_msg:
} else if (link_speed_mbps == SPEED_UNKNOWN) {
snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%s", "Unknown Mbps");
} else {
- snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%u %s",
+ snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s",
link_speed_mbps, "Mbps");
}
@@ -1522,7 +1530,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
iavf_print_link_message(adapter);
break;
case VIRTCHNL_EVENT_RESET_IMPENDING:
- dev_info(&adapter->pdev->dev, "Reset warning received from the PF\n");
+ dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n");
if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
adapter->flags |= IAVF_FLAG_RESET_PENDING;
dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index b67ad51cbcc9..6be7bc87f70c 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -848,7 +848,6 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
int ice_plug_aux_dev(struct ice_pf *pf);
void ice_unplug_aux_dev(struct ice_pf *pf);
int ice_init_rdma(struct ice_pf *pf);
-const char *ice_stat_str(enum ice_status stat_err);
const char *ice_aq_str(enum ice_aq_err aq_err);
bool ice_is_wol_supported(struct ice_hw *hw);
int
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 1efc635cc0f5..44bdd0ed1629 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -759,7 +759,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_channel *ch = ring->ch;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
+ int status;
u16 pf_q;
u8 tc;
@@ -804,9 +804,9 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
ring->q_handle, 1, qg_buf, buf_len,
NULL);
if (status) {
- dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %s\n",
- ice_stat_str(status));
- return -ENODEV;
+ dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n",
+ status);
+ return status;
}
/* Add Tx Queue TEID into the VSI Tx ring from the
@@ -929,7 +929,7 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
struct ice_pf *pf = vsi->back;
struct ice_q_vector *q_vector;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
+ int status;
u32 val;
/* clear cause_ena bit for disabled queues */
@@ -953,18 +953,18 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
rel_vmvf_num, NULL);
/* if the disable queue command was exercised during an
- * active reset flow, ICE_ERR_RESET_ONGOING is returned.
+ * active reset flow, -EBUSY is returned.
* This is not an error as the reset operation disables
* queues at the hardware level anyway.
*/
- if (status == ICE_ERR_RESET_ONGOING) {
+ if (status == -EBUSY) {
dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n");
- } else if (status == ICE_ERR_DOES_NOT_EXIST) {
+ } else if (status == -ENOENT) {
dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
} else if (status) {
- dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %s\n",
- ice_stat_str(status));
- return -ENODEV;
+ dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n",
+ status);
+ return status;
}
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index b3066d0fea8b..cc8297e1db3a 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -2,7 +2,6 @@
/* Copyright (c) 2018, Intel Corporation. */
#include "ice_common.h"
-#include "ice_lib.h"
#include "ice_sched.h"
#include "ice_adminq_cmd.h"
#include "ice_flow.h"
@@ -16,10 +15,10 @@
* This function sets the MAC type of the adapter based on the
* vendor ID and device ID stored in the HW structure.
*/
-static enum ice_status ice_set_mac_type(struct ice_hw *hw)
+static int ice_set_mac_type(struct ice_hw *hw)
{
if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
- return ICE_ERR_DEVICE_NOT_SUPPORTED;
+ return -ENODEV;
switch (hw->device_id) {
case ICE_DEV_ID_E810C_BACKPLANE:
@@ -99,7 +98,7 @@ bool ice_is_e810t(struct ice_hw *hw)
* Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
* configuration, flow director filters, etc.).
*/
-enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
+int ice_clear_pf_cfg(struct ice_hw *hw)
{
struct ice_aq_desc desc;
@@ -123,21 +122,21 @@ enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
* ice_discover_dev_caps is expected to be called before this function is
* called.
*/
-static enum ice_status
+static int
ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aqc_manage_mac_read_resp *resp;
struct ice_aqc_manage_mac_read *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
u16 flags;
u8 i;
cmd = &desc.params.mac_read;
if (buf_size < sizeof(*resp))
- return ICE_ERR_BUF_TOO_SHORT;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
@@ -150,7 +149,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
- return ICE_ERR_CFG;
+ return -EIO;
}
/* A single port can report up to two (LAN and WoL) addresses */
@@ -176,7 +175,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
*
* Returns the various PHY capabilities supported on the Port (0x0600)
*/
-enum ice_status
+int
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *pcaps,
struct ice_sq_cd *cd)
@@ -184,18 +183,18 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps *cmd;
u16 pcaps_size = sizeof(*pcaps);
struct ice_aq_desc desc;
- enum ice_status status;
struct ice_hw *hw;
+ int status;
cmd = &desc.params.get_phy;
if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw = pi->hw;
if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
!ice_fw_supports_report_dflt_cfg(hw))
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
@@ -252,7 +251,7 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
* returns error (ENOENT), then no cage present. If no cage present, then
* connection type is backplane or BASE-T.
*/
-static enum ice_status
+static int
ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
struct ice_sq_cd *cd)
{
@@ -418,7 +417,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
*
* Get Link Status (0x607). Returns the link status of the adapter.
*/
-enum ice_status
+int
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd)
{
@@ -429,12 +428,12 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_fc_info *hw_fc_info;
bool tx_pause, rx_pause;
struct ice_aq_desc desc;
- enum ice_status status;
struct ice_hw *hw;
u16 cmd_flags;
+ int status;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw = pi->hw;
li_old = &pi->phy.link_info_old;
hw_media_type = &pi->phy.media_type;
@@ -556,7 +555,7 @@ ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
*
* Set MAC configuration (0x0603)
*/
-enum ice_status
+int
ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
{
struct ice_aqc_set_mac_cfg *cmd;
@@ -565,7 +564,7 @@ ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
cmd = &desc.params.set_mac_cfg;
if (max_frame_size == 0)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
@@ -580,17 +579,17 @@ ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
* ice_init_fltr_mgmt_struct - initializes filter management list and locks
* @hw: pointer to the HW struct
*/
-static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
+static int ice_init_fltr_mgmt_struct(struct ice_hw *hw)
{
struct ice_switch_info *sw;
- enum ice_status status;
+ int status;
hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*hw->switch_info), GFP_KERNEL);
sw = hw->switch_info;
if (!sw)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
INIT_LIST_HEAD(&sw->vsi_list_map_head);
sw->prof_res_bm_init = 0;
@@ -666,17 +665,17 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
* ice_get_fw_log_cfg - get FW logging configuration
* @hw: pointer to the HW struct
*/
-static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
+static int ice_get_fw_log_cfg(struct ice_hw *hw)
{
struct ice_aq_desc desc;
- enum ice_status status;
__le16 *config;
+ int status;
u16 size;
size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
if (!config)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
@@ -738,15 +737,15 @@ static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
* messages from FW to SW. Interrupts are typically disabled during the device's
* initialization phase.
*/
-static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
+static int ice_cfg_fw_log(struct ice_hw *hw, bool enable)
{
struct ice_aqc_fw_logging *cmd;
- enum ice_status status = 0;
u16 i, chgs = 0, len = 0;
struct ice_aq_desc desc;
__le16 *data = NULL;
u8 actv_evnts = 0;
void *buf = NULL;
+ int status = 0;
if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
return 0;
@@ -790,7 +789,7 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
sizeof(*data),
GFP_KERNEL);
if (!data)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
val = i << ICE_AQC_FW_LOG_ID_S;
@@ -904,12 +903,12 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
* ice_init_hw - main hardware initialization routine
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_init_hw(struct ice_hw *hw)
+int ice_init_hw(struct ice_hw *hw)
{
struct ice_aqc_get_phy_caps_data *pcaps;
- enum ice_status status;
u16 mac_buf_len;
void *mac_buf;
+ int status;
/* Set MAC type based on DeviceID */
status = ice_set_mac_type(hw);
@@ -956,7 +955,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*hw->port_info), GFP_KERNEL);
if (!hw->port_info) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_unroll_cqinit;
}
@@ -985,7 +984,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
if (!pcaps) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_unroll_sched;
}
@@ -1006,7 +1005,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
/* need a valid SW entry point to build a Tx tree */
if (!hw->sw_entry_point_layer) {
ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
- status = ICE_ERR_CFG;
+ status = -EIO;
goto err_unroll_sched;
}
INIT_LIST_HEAD(&hw->agg_list);
@@ -1026,7 +1025,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
if (!mac_buf) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_unroll_fltr_mgmt_struct;
}
@@ -1096,7 +1095,7 @@ void ice_deinit_hw(struct ice_hw *hw)
* ice_check_reset - Check to see if a global reset is complete
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_check_reset(struct ice_hw *hw)
+int ice_check_reset(struct ice_hw *hw)
{
u32 cnt, reg = 0, grst_timeout, uld_mask;
@@ -1116,7 +1115,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
if (cnt == grst_timeout) {
ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
- return ICE_ERR_RESET_FAILED;
+ return -EIO;
}
#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
@@ -1143,7 +1142,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
if (cnt == ICE_PF_RESET_WAIT_COUNT) {
ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
reg);
- return ICE_ERR_RESET_FAILED;
+ return -EIO;
}
return 0;
@@ -1156,7 +1155,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
* If a global reset has been triggered, this function checks
* for its completion and then issues the PF reset
*/
-static enum ice_status ice_pf_reset(struct ice_hw *hw)
+static int ice_pf_reset(struct ice_hw *hw)
{
u32 cnt, reg;
@@ -1169,7 +1168,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw)
(rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
/* poll on global reset currently in progress until done */
if (ice_check_reset(hw))
- return ICE_ERR_RESET_FAILED;
+ return -EIO;
return 0;
}
@@ -1194,7 +1193,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw)
if (cnt == ICE_PF_RESET_WAIT_COUNT) {
ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
- return ICE_ERR_RESET_FAILED;
+ return -EIO;
}
return 0;
@@ -1212,7 +1211,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw)
* This has to be cleared using ice_clear_pxe_mode again, once the AQ
* interface has been restored in the rebuild flow.
*/
-enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
+int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
{
u32 val = 0;
@@ -1228,7 +1227,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
val = GLGEN_RTRIG_GLOBR_M;
break;
default:
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
val |= rd32(hw, GLGEN_RTRIG);
@@ -1247,16 +1246,16 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
*
* Copies rxq context from dense structure to HW register space
*/
-static enum ice_status
+static int
ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
{
u8 i;
if (!ice_rxq_ctx)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
if (rxq_index > QRX_CTRL_MAX_INDEX)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Copy each dword separately to HW */
for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
@@ -1306,14 +1305,14 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
* it to HW register space and enables the hardware to prefetch descriptors
* instead of only fetching them on demand
*/
-enum ice_status
+int
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index)
{
u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
if (!rlan_ctx)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
rlan_ctx->prefena = 1;
@@ -1369,9 +1368,8 @@ static int
ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd)
{
- return ice_status_to_errno(ice_sq_send_cmd(hw, ice_get_sbq(hw),
- (struct ice_aq_desc *)desc,
- buf, buf_size, cd));
+ return ice_sq_send_cmd(hw, ice_get_sbq(hw),
+ (struct ice_aq_desc *)desc, buf, buf_size, cd);
}
/**
@@ -1453,17 +1451,17 @@ static bool ice_should_retry_sq_send_cmd(u16 opcode)
* Retry sending the FW Admin Queue command, multiple times, to the FW Admin
* Queue if the EBUSY AQ error is returned.
*/
-static enum ice_status
+static int
ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aq_desc desc_cpy;
- enum ice_status status;
bool is_cmd_for_retry;
u8 *buf_cpy = NULL;
u8 idx = 0;
u16 opcode;
+ int status;
opcode = le16_to_cpu(desc->opcode);
is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
@@ -1473,7 +1471,7 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (buf) {
buf_cpy = kzalloc(buf_size, GFP_KERNEL);
if (!buf_cpy)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
memcpy(&desc_cpy, desc, sizeof(desc_cpy));
@@ -1510,13 +1508,13 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
*
* Helper function to send FW Admin Queue commands to the FW Admin Queue.
*/
-enum ice_status
+int
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
u16 buf_size, struct ice_sq_cd *cd)
{
struct ice_aqc_req_res *cmd = &desc->params.res_owner;
bool lock_acquired = false;
- enum ice_status status;
+ int status;
/* When a package download is in process (i.e. when the firmware's
* Global Configuration Lock resource is held), only the Download
@@ -1555,11 +1553,11 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
*
* Get the firmware version (0x0001) from the admin queue commands
*/
-enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
+int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
{
struct ice_aqc_get_ver *resp;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
resp = &desc.params.get_ver;
@@ -1590,7 +1588,7 @@ enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
*
* Send the driver version (0x0002) to the firmware
*/
-enum ice_status
+int
ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
struct ice_sq_cd *cd)
{
@@ -1601,7 +1599,7 @@ ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
cmd = &desc.params.driver_ver;
if (!dv)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
@@ -1627,7 +1625,7 @@ ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
* Tell the Firmware that we're shutting down the AdminQ and whether
* or not the driver is unloading as well (0x0003).
*/
-enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
+int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
{
struct ice_aqc_q_shutdown *cmd;
struct ice_aq_desc desc;
@@ -1654,12 +1652,12 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
* Requests common resource using the admin queue commands (0x0008).
* When attempting to acquire the Global Config Lock, the driver can
* learn of three states:
- * 1) ICE_SUCCESS - acquired lock, and can perform download package
- * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
- * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
- * successfully downloaded the package; the driver does
- * not have to download the package and can continue
- * loading
+ * 1) 0 - acquired lock, and can perform download package
+ * 2) -EIO - did not get lock, driver should fail to load
+ * 3) -EALREADY - did not get lock, but another driver has
+ * successfully downloaded the package; the driver does
+ * not have to download the package and can continue
+ * loading
*
* Note that if the caller is in an acquire lock, perform action, release lock
* phase of operation, it is possible that the FW may detect a timeout and issue
@@ -1668,14 +1666,14 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
* will likely get an error propagated back to it indicating the Download
* Package, Update Package or the Release Resource AQ commands timed out.
*/
-static enum ice_status
+static int
ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
struct ice_sq_cd *cd)
{
struct ice_aqc_req_res *cmd_resp;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd_resp = &desc.params.res_owner;
@@ -1707,15 +1705,15 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
} else if (le16_to_cpu(cmd_resp->status) ==
ICE_AQ_RES_GLBL_IN_PROG) {
*timeout = le32_to_cpu(cmd_resp->timeout);
- return ICE_ERR_AQ_ERROR;
+ return -EIO;
} else if (le16_to_cpu(cmd_resp->status) ==
ICE_AQ_RES_GLBL_DONE) {
- return ICE_ERR_AQ_NO_WORK;
+ return -EALREADY;
}
/* invalid FW response, force a timeout immediately */
*timeout = 0;
- return ICE_ERR_AQ_ERROR;
+ return -EIO;
}
/* If the resource is held by some other driver, the command completes
@@ -1737,7 +1735,7 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
*
* release common resource using the admin queue commands (0x0009)
*/
-static enum ice_status
+static int
ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
struct ice_sq_cd *cd)
{
@@ -1763,23 +1761,23 @@ ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
*
* This function will attempt to acquire the ownership of a resource.
*/
-enum ice_status
+int
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u32 timeout)
{
#define ICE_RES_POLLING_DELAY_MS 10
u32 delay = ICE_RES_POLLING_DELAY_MS;
u32 time_left = timeout;
- enum ice_status status;
+ int status;
status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
- /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
+ /* A return code of -EALREADY means that another driver has
* previously acquired the resource and performed any necessary updates;
* in this case the caller does not obtain the resource and has no
* further work to do.
*/
- if (status == ICE_ERR_AQ_NO_WORK)
+ if (status == -EALREADY)
goto ice_acquire_res_exit;
if (status)
@@ -1792,7 +1790,7 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
timeout = (timeout > delay) ? timeout - delay : 0;
status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
- if (status == ICE_ERR_AQ_NO_WORK)
+ if (status == -EALREADY)
/* lock free, but no work to do */
break;
@@ -1800,15 +1798,15 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
/* lock acquired */
break;
}
- if (status && status != ICE_ERR_AQ_NO_WORK)
+ if (status && status != -EALREADY)
ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
ice_acquire_res_exit:
- if (status == ICE_ERR_AQ_NO_WORK) {
+ if (status == -EALREADY) {
if (access == ICE_RES_WRITE)
ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
else
- ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
+ ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n");
}
return status;
}
@@ -1822,16 +1820,15 @@ ice_acquire_res_exit:
*/
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
{
- enum ice_status status;
u32 total_delay = 0;
+ int status;
status = ice_aq_release_res(hw, res, 0, NULL);
/* there are some rare cases when trying to release the resource
* results in an admin queue timeout, so handle them correctly
*/
- while ((status == ICE_ERR_AQ_TIMEOUT) &&
- (total_delay < hw->adminq.sq_cmd_timeout)) {
+ while ((status == -EIO) && (total_delay < hw->adminq.sq_cmd_timeout)) {
mdelay(1);
status = ice_aq_release_res(hw, res, 0, NULL);
total_delay++;
@@ -1849,7 +1846,7 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
*
* Helper function to allocate/free resources using the admin queue commands
*/
-enum ice_status
+int
ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
enum ice_adminq_opc opc, struct ice_sq_cd *cd)
@@ -1860,10 +1857,10 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
cmd = &desc.params.sw_res_ctrl;
if (!buf)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (buf_size < flex_array_size(buf, elem, num_entries))
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
@@ -1882,17 +1879,17 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
* @btm: allocate from bottom
* @res: pointer to array that will receive the resources
*/
-enum ice_status
+int
ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
{
struct ice_aqc_alloc_free_res_elem *buf;
- enum ice_status status;
u16 buf_len;
+ int status;
buf_len = struct_size(buf, elem, num);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Prepare buffer to allocate resource. */
buf->num_elems = cpu_to_le16(num);
@@ -1920,16 +1917,16 @@ ice_alloc_res_exit:
* @num: number of resources
* @res: pointer to array that contains the resources to free
*/
-enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
+int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
{
struct ice_aqc_alloc_free_res_elem *buf;
- enum ice_status status;
u16 buf_len;
+ int status;
buf_len = struct_size(buf, elem, num);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Prepare buffer to free resource. */
buf->num_elems = cpu_to_le16(num);
@@ -2486,19 +2483,19 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
* buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
* firmware could return) to avoid this.
*/
-enum ice_status
+int
ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
struct ice_aqc_list_caps *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.get_cap;
if (opc != ice_aqc_opc_list_func_caps &&
opc != ice_aqc_opc_list_dev_caps)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
@@ -2517,16 +2514,16 @@ ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
* Read the device capabilities and extract them into the dev_caps structure
* for later use.
*/
-enum ice_status
+int
ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
{
- enum ice_status status;
u32 cap_count = 0;
void *cbuf;
+ int status;
cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
if (!cbuf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Although the driver doesn't know the number of capabilities the
* device will return, we can simply send a 4KB buffer, the maximum
@@ -2551,16 +2548,16 @@ ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
* Read the function capabilities and extract them into the func_caps structure
* for later use.
*/
-static enum ice_status
+static int
ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
{
- enum ice_status status;
u32 cap_count = 0;
void *cbuf;
+ int status;
cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
if (!cbuf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Although the driver doesn't know the number of capabilities the
* device will return, we can simply send a 4KB buffer, the maximum
@@ -2650,9 +2647,9 @@ void ice_set_safe_mode_caps(struct ice_hw *hw)
* ice_get_caps - get info about the HW
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_get_caps(struct ice_hw *hw)
+int ice_get_caps(struct ice_hw *hw)
{
- enum ice_status status;
+ int status;
status = ice_discover_dev_caps(hw, &hw->dev_caps);
if (status)
@@ -2670,7 +2667,7 @@ enum ice_status ice_get_caps(struct ice_hw *hw)
*
* This function is used to write MAC address to the NVM (0x0108).
*/
-enum ice_status
+int
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd)
{
@@ -2692,7 +2689,7 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
*
* Tell the firmware that the driver is taking over from PXE (0x0110).
*/
-static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
+static int ice_aq_clear_pxe_mode(struct ice_hw *hw)
{
struct ice_aq_desc desc;
@@ -2903,15 +2900,15 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
* mode as the PF may not have the privilege to set some of the PHY Config
* parameters. This status will be indicated by the command response (0x0601).
*/
-enum ice_status
+int
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (!cfg)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Ensure that only valid bits of cfg->caps can be turned on. */
if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
@@ -2952,13 +2949,13 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
* ice_update_link_info - update status of the HW network link
* @pi: port info structure of the interested logical port
*/
-enum ice_status ice_update_link_info(struct ice_port_info *pi)
+int ice_update_link_info(struct ice_port_info *pi)
{
struct ice_link_status *li;
- enum ice_status status;
+ int status;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
li = &pi->phy.link_info;
@@ -2974,7 +2971,7 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi)
pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
GFP_KERNEL);
if (!pcaps)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
pcaps, NULL);
@@ -3070,7 +3067,7 @@ enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
* @cfg: PHY configuration data to set FC mode
* @req_mode: FC mode to configure
*/
-enum ice_status
+int
ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fc_mode req_mode)
{
@@ -3078,7 +3075,7 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
u8 pause_mask = 0x0;
if (!pi || !cfg)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
switch (req_mode) {
case ICE_FC_FULL:
@@ -3117,23 +3114,23 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
*
* Set the requested flow control mode.
*/
-enum ice_status
+int
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
{
struct ice_aqc_set_phy_cfg_data cfg = { 0 };
struct ice_aqc_get_phy_caps_data *pcaps;
- enum ice_status status;
struct ice_hw *hw;
+ int status;
if (!pi || !aq_failures)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
*aq_failures = 0;
hw = pi->hw;
pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Get the current PHY config */
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
@@ -3258,22 +3255,22 @@ ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
* @cfg: PHY configuration data to set FEC mode
* @fec: FEC mode to configure
*/
-enum ice_status
+int
ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fec_mode fec)
{
struct ice_aqc_get_phy_caps_data *pcaps;
- enum ice_status status;
struct ice_hw *hw;
+ int status;
if (!pi || !cfg)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
hw = pi->hw;
pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
status = ice_aq_get_phy_caps(pi, false,
(ice_fw_supports_report_dflt_cfg(hw) ?
@@ -3313,7 +3310,7 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
cfg->link_fec_opt |= pcaps->link_fec_options;
break;
default:
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
break;
}
@@ -3344,13 +3341,13 @@ out:
* The variable link_up is invalid if status is non zero. As a
* result of this call, link status reporting becomes enabled
*/
-enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
+int ice_get_link_status(struct ice_port_info *pi, bool *link_up)
{
struct ice_phy_info *phy_info;
- enum ice_status status = 0;
+ int status = 0;
if (!pi || !link_up)
- return ICE_ERR_PARAM;
+ return -EINVAL;
phy_info = &pi->phy;
@@ -3375,7 +3372,7 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
*
* Sets up the link and restarts the Auto-Negotiation over the link.
*/
-enum ice_status
+int
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd)
{
@@ -3405,7 +3402,7 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
*
* Set event mask (0x0613)
*/
-enum ice_status
+int
ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
struct ice_sq_cd *cd)
{
@@ -3430,7 +3427,7 @@ ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
*
* Enable/disable loopback on a given port
*/
-enum ice_status
+int
ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
{
struct ice_aqc_set_mac_lb *cmd;
@@ -3453,7 +3450,7 @@ ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
*
* Set LED value for the given port (0x06e9)
*/
-enum ice_status
+int
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd)
{
@@ -3488,17 +3485,17 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
*
* Read/Write SFF EEPROM (0x06EE)
*/
-enum ice_status
+int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
bool write, struct ice_sq_cd *cd)
{
struct ice_aqc_sff_eeprom *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (!data || (mem_addr & 0xff00))
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
cmd = &desc.params.read_write_sff_param;
@@ -3527,23 +3524,23 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
*
* Internal function to get (0x0B05) or set (0x0B03) RSS look up table
*/
-static enum ice_status
+static int
__ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
{
u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
struct ice_aqc_get_set_rss_lut *cmd_resp;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
u8 *lut;
if (!params)
- return ICE_ERR_PARAM;
+ return -EINVAL;
vsi_handle = params->vsi_handle;
lut = params->lut;
if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
- return ICE_ERR_PARAM;
+ return -EINVAL;
lut_size = params->lut_size;
lut_type = params->lut_type;
@@ -3572,7 +3569,7 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params
ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
break;
default:
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto ice_aq_get_set_rss_lut_exit;
}
@@ -3607,7 +3604,7 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params
}
fallthrough;
default:
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto ice_aq_get_set_rss_lut_exit;
}
@@ -3626,7 +3623,7 @@ ice_aq_get_set_rss_lut_exit:
*
* get the RSS lookup table, PF or VSI type
*/
-enum ice_status
+int
ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
{
return __ice_aq_get_set_rss_lut(hw, get_params, false);
@@ -3639,7 +3636,7 @@ ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_
*
* set the RSS lookup table, PF or VSI type
*/
-enum ice_status
+int
ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
{
return __ice_aq_get_set_rss_lut(hw, set_params, true);
@@ -3654,10 +3651,9 @@ ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_
*
* get (0x0B04) or set (0x0B02) the RSS key per VSI
*/
-static enum
-ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
- struct ice_aqc_get_set_rss_keys *key,
- bool set)
+static int
+__ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
+ struct ice_aqc_get_set_rss_keys *key, bool set)
{
struct ice_aqc_get_set_rss_key *cmd_resp;
u16 key_size = sizeof(*key);
@@ -3688,12 +3684,12 @@ ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
*
* get the RSS key per VSI
*/
-enum ice_status
+int
ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *key)
{
if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
- return ICE_ERR_PARAM;
+ return -EINVAL;
return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
key, false);
@@ -3707,12 +3703,12 @@ ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
*
* set the RSS key per VSI
*/
-enum ice_status
+int
ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys)
{
if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
- return ICE_ERR_PARAM;
+ return -EINVAL;
return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
keys, true);
@@ -3739,7 +3735,7 @@ ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
* Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
* flow.
*/
-static enum ice_status
+static int
ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
struct ice_sq_cd *cd)
@@ -3754,10 +3750,10 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
if (!qg_list)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
- return ICE_ERR_PARAM;
+ return -EINVAL;
for (i = 0, list = qg_list; i < num_qgrps; i++) {
sum_size += struct_size(list, txqs, list->num_txqs);
@@ -3766,7 +3762,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
}
if (buf_size != sum_size)
- return ICE_ERR_PARAM;
+ return -EINVAL;
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
@@ -3787,7 +3783,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
*
* Disable LAN Tx queue (0x0C31)
*/
-static enum ice_status
+static int
ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
@@ -3796,18 +3792,18 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
struct ice_aqc_dis_txq_item *item;
struct ice_aqc_dis_txqs *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
u16 i, sz = 0;
+ int status;
cmd = &desc.params.dis_txqs;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
/* qg_list can be NULL only in VM/VF reset flow */
if (!qg_list && !rst_src)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
- return ICE_ERR_PARAM;
+ return -EINVAL;
cmd->num_entries = num_qgrps;
@@ -3856,7 +3852,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
}
if (buf_size != sz)
- return ICE_ERR_PARAM;
+ return -EINVAL;
do_aq:
status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
@@ -3914,8 +3910,7 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
cmd->num_qset_grps = num_qset_grps;
- return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, qset_list,
- buf_size, cd));
+ return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd);
}
/* End of FW Admin Queue command wrappers */
@@ -4111,7 +4106,7 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
* @dest_ctx: pointer to memory for the packed structure
* @ce_info: a description of the structure to be transformed
*/
-enum ice_status
+int
ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
const struct ice_ctx_ele *ce_info)
{
@@ -4141,7 +4136,7 @@ ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
break;
default:
- return ICE_ERR_INVAL_SIZE;
+ return -EINVAL;
}
}
@@ -4185,7 +4180,7 @@ ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
*
* This function adds one LAN queue
*/
-enum ice_status
+int
ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd)
@@ -4193,19 +4188,19 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
struct ice_aqc_txsched_elem_data node = { 0 };
struct ice_sched_node *parent;
struct ice_q_ctx *q_ctx;
- enum ice_status status;
struct ice_hw *hw;
+ int status;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
- return ICE_ERR_CFG;
+ return -EIO;
if (num_qgrps > 1 || buf->num_txqs > 1)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
hw = pi->hw;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&pi->sched_lock);
@@ -4213,7 +4208,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
if (!q_ctx) {
ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
q_handle);
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto ena_txq_exit;
}
@@ -4221,7 +4216,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
ICE_SCHED_NODE_OWNER_LAN);
if (!parent) {
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto ena_txq_exit;
}
@@ -4290,20 +4285,20 @@ ena_txq_exit:
*
* This function removes queues and their corresponding nodes in SW DB
*/
-enum ice_status
+int
ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
u16 *q_handles, u16 *q_ids, u32 *q_teids,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd)
{
- enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
struct ice_aqc_dis_txq_item *qg_list;
struct ice_q_ctx *q_ctx;
+ int status = -ENOENT;
struct ice_hw *hw;
u16 i, buf_size;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
- return ICE_ERR_CFG;
+ return -EIO;
hw = pi->hw;
@@ -4315,13 +4310,13 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
if (rst_src)
return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
vmvf_num, NULL);
- return ICE_ERR_CFG;
+ return -EIO;
}
buf_size = struct_size(qg_list, q_id, 1);
qg_list = kzalloc(buf_size, GFP_KERNEL);
if (!qg_list)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
mutex_lock(&pi->sched_lock);
@@ -4368,18 +4363,18 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
*
* This function adds/updates the VSI queues per TC.
*/
-static enum ice_status
+static int
ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
u16 *maxqs, u8 owner)
{
- enum ice_status status = 0;
+ int status = 0;
u8 i;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
- return ICE_ERR_CFG;
+ return -EIO;
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&pi->sched_lock);
@@ -4407,7 +4402,7 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
*
* This function adds/updates the VSI LAN queues per TC.
*/
-enum ice_status
+int
ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
u16 *max_lanqs)
{
@@ -4428,9 +4423,8 @@ int
ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
u16 *max_rdmaqs)
{
- return ice_status_to_errno(ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap,
- max_rdmaqs,
- ICE_SCHED_NODE_OWNER_RDMA));
+ return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs,
+ ICE_SCHED_NODE_OWNER_RDMA);
}
/**
@@ -4451,7 +4445,6 @@ ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
struct ice_aqc_txsched_elem_data node = { 0 };
struct ice_aqc_add_rdma_qset_data *buf;
struct ice_sched_node *parent;
- enum ice_status status;
struct ice_hw *hw;
u16 i, buf_size;
int ret;
@@ -4502,12 +4495,10 @@ ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
for (i = 0; i < num_qsets; i++) {
node.node_teid = buf->rdma_qsets[i].qset_teid;
- status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
- &node);
- if (status) {
- ret = ice_status_to_errno(status);
+ ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
+ &node);
+ if (ret)
break;
- }
qset_teid[i] = le32_to_cpu(node.node_teid);
}
rdma_error_exit:
@@ -4528,8 +4519,8 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
u16 *q_id)
{
struct ice_aqc_dis_txq_item *qg_list;
- enum ice_status status = 0;
struct ice_hw *hw;
+ int status = 0;
u16 qg_size;
int i;
@@ -4568,7 +4559,7 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
mutex_unlock(&pi->sched_lock);
kfree(qg_list);
- return ice_status_to_errno(status);
+ return status;
}
/**
@@ -4577,7 +4568,7 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
*
* Initializes required config data for VSI, FD, ACL, and RSS before replay.
*/
-static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
+static int ice_replay_pre_init(struct ice_hw *hw)
{
struct ice_switch_info *sw = hw->switch_info;
u8 i;
@@ -4604,12 +4595,12 @@ static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
* Restore all VSI configuration after reset. It is required to call this
* function with main VSI first.
*/
-enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
+int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
{
- enum ice_status status;
+ int status;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Replay pre-initialization if there is any */
if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
@@ -4725,12 +4716,12 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
*
* This function queries HW element information
*/
-enum ice_status
+int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf)
{
u16 buf_size, num_elem_ret = 0;
- enum ice_status status;
+ int status;
buf_size = sizeof(*buf);
memset(buf, 0, buf_size);
@@ -4775,7 +4766,7 @@ ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
cmd->param_indx = idx;
cmd->param_val = cpu_to_le32(value);
- return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd));
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
/**
@@ -4796,7 +4787,7 @@ ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
{
struct ice_aqc_driver_shared_params *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
return -EIO;
@@ -4810,7 +4801,7 @@ ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
if (status)
- return ice_status_to_errno(status);
+ return status;
*value = le32_to_cpu(cmd->param_val);
@@ -4840,7 +4831,7 @@ ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
cmd->gpio_num = pin_idx;
cmd->gpio_val = value ? 1 : 0;
- return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd));
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
/**
@@ -4860,7 +4851,7 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
{
struct ice_aqc_gpio *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
cmd = &desc.params.read_write_gpio;
@@ -4869,7 +4860,7 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
if (status)
- return ice_status_to_errno(status);
+ return status;
*value = !!cmd->gpio_val;
return 0;
@@ -4903,13 +4894,13 @@ bool ice_fw_supports_link_override(struct ice_hw *hw)
*
* Gets the link default override for a port
*/
-enum ice_status
+int
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi)
{
u16 i, tlv, tlv_len, tlv_start, buf, offset;
struct ice_hw *hw = pi->hw;
- enum ice_status status;
+ int status;
status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
@@ -4994,7 +4985,7 @@ bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
*
* Set the LLDP MIB. (0x0A08)
*/
-enum ice_status
+int
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
@@ -5004,7 +4995,7 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
cmd = &desc.params.lldp_set_mib;
if (buf_size == 0 || !buf)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
@@ -5044,7 +5035,7 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
* @vsi_num: absolute HW index for VSI
* @add: boolean for if adding or removing a filter
*/
-enum ice_status
+int
ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
{
struct ice_aqc_lldp_filter_ctrl *cmd;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 65c1b3244264..1c57097ddf0b 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -14,108 +14,108 @@
#define ICE_SQ_SEND_DELAY_TIME_MS 10
#define ICE_SQ_SEND_MAX_EXECUTE 3
-enum ice_status ice_init_hw(struct ice_hw *hw);
+int ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
-enum ice_status ice_check_reset(struct ice_hw *hw);
-enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);
-enum ice_status ice_create_all_ctrlq(struct ice_hw *hw);
-enum ice_status ice_init_all_ctrlq(struct ice_hw *hw);
+int ice_check_reset(struct ice_hw *hw);
+int ice_reset(struct ice_hw *hw, enum ice_reset_req req);
+int ice_create_all_ctrlq(struct ice_hw *hw);
+int ice_init_all_ctrlq(struct ice_hw *hw);
void ice_shutdown_all_ctrlq(struct ice_hw *hw);
void ice_destroy_all_ctrlq(struct ice_hw *hw);
-enum ice_status
+int
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_rq_event_info *e, u16 *pending);
-enum ice_status
+int
ice_get_link_status(struct ice_port_info *pi, bool *link_up);
-enum ice_status ice_update_link_info(struct ice_port_info *pi);
-enum ice_status
+int ice_update_link_info(struct ice_port_info *pi);
+int
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u32 timeout);
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
-enum ice_status
+int
ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res);
-enum ice_status
+int
ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res);
-enum ice_status
+int
ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
enum ice_adminq_opc opc, struct ice_sq_cd *cd);
bool ice_is_sbq_supported(struct ice_hw *hw);
struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw);
-enum ice_status
+int
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
void ice_clear_pxe_mode(struct ice_hw *hw);
-enum ice_status ice_get_caps(struct ice_hw *hw);
+int ice_get_caps(struct ice_hw *hw);
void ice_set_safe_mode_caps(struct ice_hw *hw);
-enum ice_status
+int
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
-enum ice_status
+int
ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params);
-enum ice_status
+int
ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params);
-enum ice_status
+int
ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys);
-enum ice_status
+int
ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys);
bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
-enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
+int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
extern const struct ice_ctx_ele ice_tlan_ctx_info[];
-enum ice_status
+int
ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
const struct ice_ctx_ele *ce_info);
extern struct mutex ice_global_cfg_lock_sw;
-enum ice_status
+int
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd);
-enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);
+int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
enum ice_adminq_opc opc, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps);
void
ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
u16 link_speeds_bitmap);
-enum ice_status
+int
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
bool ice_is_e810(struct ice_hw *hw);
-enum ice_status ice_clear_pf_cfg(struct ice_hw *hw);
-enum ice_status
+int ice_clear_pf_cfg(struct ice_hw *hw);
+int
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd);
bool ice_fw_supports_link_override(struct ice_hw *hw);
-enum ice_status
+int
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi);
bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps);
enum ice_fc_mode ice_caps_to_fc_mode(u8 caps);
enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options);
-enum ice_status
+int
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures,
bool ena_auto_link_update);
-enum ice_status
+int
ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fc_mode fc);
bool
@@ -125,27 +125,27 @@ void
ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_aqc_set_phy_cfg_data *cfg);
-enum ice_status
+int
ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fec_mode fec);
-enum ice_status
+int
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
bool write, struct ice_sq_cd *cd);
@@ -159,19 +159,19 @@ ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
int
ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
u16 *q_id);
-enum ice_status
+int
ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
u16 *q_handle, u16 *q_ids, u32 *q_teids,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
u16 *max_lanqs);
-enum ice_status
+int
ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd);
-enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
+int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
struct ice_q_ctx *
@@ -184,7 +184,7 @@ void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
bool ice_is_e810t(struct ice_hw *hw);
-enum ice_status
+int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
int
@@ -199,11 +199,11 @@ ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
int
ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
bool *value, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
-enum ice_status
+int
ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 03bdb125be36..6bcfee295991 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -87,7 +87,7 @@ bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*/
-static enum ice_status
+static int
ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
@@ -96,7 +96,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
&cq->sq.desc_buf.pa,
GFP_KERNEL | __GFP_ZERO);
if (!cq->sq.desc_buf.va)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
cq->sq.desc_buf.size = size;
cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
@@ -107,7 +107,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->sq.desc_buf.va = NULL;
cq->sq.desc_buf.pa = 0;
cq->sq.desc_buf.size = 0;
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
return 0;
@@ -118,7 +118,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*/
-static enum ice_status
+static int
ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
@@ -127,7 +127,7 @@ ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
&cq->rq.desc_buf.pa,
GFP_KERNEL | __GFP_ZERO);
if (!cq->rq.desc_buf.va)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
cq->rq.desc_buf.size = size;
return 0;
}
@@ -154,7 +154,7 @@ static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*/
-static enum ice_status
+static int
ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
int i;
@@ -165,7 +165,7 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries,
sizeof(cq->rq.desc_buf), GFP_KERNEL);
if (!cq->rq.dma_head)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
/* allocate the mapped buffers */
@@ -218,7 +218,7 @@ unwind_alloc_rq_bufs:
devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
cq->rq.dma_head = NULL;
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
/**
@@ -226,7 +226,7 @@ unwind_alloc_rq_bufs:
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*/
-static enum ice_status
+static int
ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
int i;
@@ -235,7 +235,7 @@ ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
sizeof(cq->sq.desc_buf), GFP_KERNEL);
if (!cq->sq.dma_head)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
/* allocate the mapped buffers */
@@ -266,10 +266,10 @@ unwind_alloc_sq_bufs:
devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
cq->sq.dma_head = NULL;
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
-static enum ice_status
+static int
ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
{
/* Clear Head and Tail */
@@ -283,7 +283,7 @@ ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
/* Check one register to verify that config was applied */
if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa))
- return ICE_ERR_AQ_ERROR;
+ return -EIO;
return 0;
}
@@ -295,8 +295,7 @@ ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
*
* Configure base address and length registers for the transmit queue
*/
-static enum ice_status
-ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static int ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
}
@@ -308,10 +307,9 @@ ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
*
* Configure base address and length registers for the receive (event queue)
*/
-static enum ice_status
-ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static int ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status status;
+ int status;
status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
if (status)
@@ -361,19 +359,19 @@ do { \
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
*/
-static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static int ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status ret_code;
+ int ret_code;
if (cq->sq.count > 0) {
/* queue already initialized */
- ret_code = ICE_ERR_NOT_READY;
+ ret_code = -EBUSY;
goto init_ctrlq_exit;
}
/* verify input for valid configuration */
if (!cq->num_sq_entries || !cq->sq_buf_size) {
- ret_code = ICE_ERR_CFG;
+ ret_code = -EIO;
goto init_ctrlq_exit;
}
@@ -421,19 +419,19 @@ init_ctrlq_exit:
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
*/
-static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static int ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status ret_code;
+ int ret_code;
if (cq->rq.count > 0) {
/* queue already initialized */
- ret_code = ICE_ERR_NOT_READY;
+ ret_code = -EBUSY;
goto init_ctrlq_exit;
}
/* verify input for valid configuration */
if (!cq->num_rq_entries || !cq->rq_buf_size) {
- ret_code = ICE_ERR_CFG;
+ ret_code = -EIO;
goto init_ctrlq_exit;
}
@@ -474,15 +472,14 @@ init_ctrlq_exit:
*
* The main shutdown routine for the Control Transmit Queue
*/
-static enum ice_status
-ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static int ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status ret_code = 0;
+ int ret_code = 0;
mutex_lock(&cq->sq_lock);
if (!cq->sq.count) {
- ret_code = ICE_ERR_NOT_READY;
+ ret_code = -EBUSY;
goto shutdown_sq_out;
}
@@ -541,15 +538,14 @@ static bool ice_aq_ver_check(struct ice_hw *hw)
*
* The main shutdown routine for the Control Receive Queue
*/
-static enum ice_status
-ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static int ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status ret_code = 0;
+ int ret_code = 0;
mutex_lock(&cq->rq_lock);
if (!cq->rq.count) {
- ret_code = ICE_ERR_NOT_READY;
+ ret_code = -EBUSY;
goto shutdown_rq_out;
}
@@ -576,17 +572,17 @@ shutdown_rq_out:
* ice_init_check_adminq - Check version for Admin Queue to know if its alive
* @hw: pointer to the hardware structure
*/
-static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
+static int ice_init_check_adminq(struct ice_hw *hw)
{
struct ice_ctl_q_info *cq = &hw->adminq;
- enum ice_status status;
+ int status;
status = ice_aq_get_fw_ver(hw, NULL);
if (status)
goto init_ctrlq_free_rq;
if (!ice_aq_ver_check(hw)) {
- status = ICE_ERR_FW_API_VER;
+ status = -EIO;
goto init_ctrlq_free_rq;
}
@@ -612,10 +608,10 @@ init_ctrlq_free_rq:
*
* NOTE: this function does not initialize the controlq locks
*/
-static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
+static int ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
{
struct ice_ctl_q_info *cq;
- enum ice_status ret_code;
+ int ret_code;
switch (q_type) {
case ICE_CTL_Q_ADMIN:
@@ -631,14 +627,14 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
cq = &hw->mailboxq;
break;
default:
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
cq->qtype = q_type;
/* verify input for valid configuration */
if (!cq->num_rq_entries || !cq->num_sq_entries ||
!cq->rq_buf_size || !cq->sq_buf_size) {
- return ICE_ERR_CFG;
+ return -EIO;
}
/* setup SQ command write back timeout */
@@ -751,10 +747,10 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw)
*
* NOTE: this function does not initialize the controlq locks.
*/
-enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
+int ice_init_all_ctrlq(struct ice_hw *hw)
{
- enum ice_status status;
u32 retry = 0;
+ int status;
/* Init FW admin queue */
do {
@@ -763,7 +759,7 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
return status;
status = ice_init_check_adminq(hw);
- if (status != ICE_ERR_AQ_FW_CRITICAL)
+ if (status != -EIO)
break;
ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
@@ -814,7 +810,7 @@ static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
* driver needs to re-initialize control queues at run time it should call
* ice_init_all_ctrlq instead.
*/
-enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
+int ice_create_all_ctrlq(struct ice_hw *hw)
{
ice_init_ctrlq_locks(&hw->adminq);
if (ice_is_sbq_supported(hw))
@@ -962,7 +958,7 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* This is the main send command routine for the ATQ. It runs the queue,
* cleans the queue, etc.
*/
-enum ice_status
+int
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
@@ -970,27 +966,27 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_dma_mem *dma_buf = NULL;
struct ice_aq_desc *desc_on_ring;
bool cmd_completed = false;
- enum ice_status status = 0;
struct ice_sq_cd *details;
u32 total_delay = 0;
+ int status = 0;
u16 retval = 0;
u32 val = 0;
/* if reset is in progress return a soft error */
if (hw->reset_ongoing)
- return ICE_ERR_RESET_ONGOING;
+ return -EBUSY;
mutex_lock(&cq->sq_lock);
cq->sq_last_status = ICE_AQ_RC_OK;
if (!cq->sq.count) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
- status = ICE_ERR_AQ_EMPTY;
+ status = -EIO;
goto sq_send_command_error;
}
if ((buf && !buf_size) || (!buf && buf_size)) {
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto sq_send_command_error;
}
@@ -998,7 +994,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (buf_size > cq->sq_buf_size) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
buf_size);
- status = ICE_ERR_INVAL_SIZE;
+ status = -EINVAL;
goto sq_send_command_error;
}
@@ -1011,7 +1007,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (val >= cq->num_sq_entries) {
ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
val);
- status = ICE_ERR_AQ_EMPTY;
+ status = -EIO;
goto sq_send_command_error;
}
@@ -1028,7 +1024,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
*/
if (ice_clean_sq(hw, cq) == 0) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
- status = ICE_ERR_AQ_FULL;
+ status = -ENOSPC;
goto sq_send_command_error;
}
@@ -1082,7 +1078,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (copy_size > buf_size) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
copy_size, buf_size);
- status = ICE_ERR_AQ_ERROR;
+ status = -EIO;
} else {
memcpy(buf, dma_buf->va, copy_size);
}
@@ -1098,7 +1094,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
}
cmd_completed = true;
if (!status && retval != ICE_AQ_RC_OK)
- status = ICE_ERR_AQ_ERROR;
+ status = -EIO;
cq->sq_last_status = (enum ice_aq_err)retval;
}
@@ -1116,10 +1112,10 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
- status = ICE_ERR_AQ_FW_CRITICAL;
+ status = -EIO;
} else {
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
- status = ICE_ERR_AQ_TIMEOUT;
+ status = -EIO;
}
}
@@ -1154,15 +1150,15 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
* the contents through e. It can also return how many events are
* left to process through 'pending'.
*/
-enum ice_status
+int
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_rq_event_info *e, u16 *pending)
{
u16 ntc = cq->rq.next_to_clean;
enum ice_aq_err rq_last_status;
- enum ice_status ret_code = 0;
struct ice_aq_desc *desc;
struct ice_dma_mem *bi;
+ int ret_code = 0;
u16 desc_idx;
u16 datalen;
u16 flags;
@@ -1176,7 +1172,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (!cq->rq.count) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
- ret_code = ICE_ERR_AQ_EMPTY;
+ ret_code = -EIO;
goto clean_rq_elem_err;
}
@@ -1185,7 +1181,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
- ret_code = ICE_ERR_AQ_NO_WORK;
+ ret_code = -EALREADY;
goto clean_rq_elem_out;
}
@@ -1196,7 +1192,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
if (flags & ICE_AQ_FLAG_ERR) {
- ret_code = ICE_ERR_AQ_ERROR;
+ ret_code = -EIO;
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
le16_to_cpu(desc->opcode), rq_last_status);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 241427cd9bc0..0b146a0d4205 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -2,7 +2,6 @@
/* Copyright (c) 2019, Intel Corporation. */
#include "ice_common.h"
-#include "ice_lib.h"
#include "ice_sched.h"
#include "ice_dcb.h"
@@ -19,19 +18,19 @@
*
* Requests the complete LLDP MIB (entire packet). (0x0A00)
*/
-static enum ice_status
+static int
ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
u16 buf_size, u16 *local_len, u16 *remote_len,
struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_get_mib *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.lldp_get_mib;
if (buf_size == 0 || !buf)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_get_mib);
@@ -61,7 +60,7 @@ ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
* Enable or Disable posting of an event on ARQ when LLDP MIB
* associated with the interface changes (0x0A01)
*/
-static enum ice_status
+static int
ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
struct ice_sq_cd *cd)
{
@@ -89,7 +88,7 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
*
* Stop or Shutdown the embedded LLDP Agent (0x0A05)
*/
-enum ice_status
+int
ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
struct ice_sq_cd *cd)
{
@@ -117,8 +116,7 @@ ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
*
* Start the embedded LLDP Agent on all ports. (0x0A06)
*/
-enum ice_status
-ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd)
+int ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_start *cmd;
struct ice_aq_desc desc;
@@ -598,18 +596,17 @@ ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
*
* Parse DCB configuration from the LLDPDU
*/
-static enum ice_status
-ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
+static int ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
{
struct ice_lldp_org_tlv *tlv;
- enum ice_status ret = 0;
u16 offset = 0;
+ int ret = 0;
u16 typelen;
u16 type;
u16 len;
if (!lldpmib || !dcbcfg)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* set to the start of LLDPDU */
lldpmib += ETH_HLEN;
@@ -649,17 +646,17 @@ ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
*
* Query DCB configuration from the firmware
*/
-enum ice_status
+int
ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
struct ice_dcbx_cfg *dcbcfg)
{
- enum ice_status ret;
u8 *lldpmib;
+ int ret;
/* Allocate the LLDPDU */
lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL);
if (!lldpmib)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
ret = ice_aq_get_lldp_mib(hw, bridgetype, mib_type, (void *)lldpmib,
ICE_LLDPDU_SIZE, NULL, NULL, NULL);
@@ -684,17 +681,17 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
* @cd: pointer to command details structure or NULL
*
* Start/Stop the embedded dcbx Agent. In case that this wrapper function
- * returns ICE_SUCCESS, caller will need to check if FW returns back the same
+ * returns 0, caller will need to check if FW returns back the same
* value as stated in dcbx_agent_status, and react accordingly. (0x0A09)
*/
-enum ice_status
+int
ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
bool *dcbx_agent_status, struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_stop_start_specific_agent *cmd;
- enum ice_status status;
struct ice_aq_desc desc;
u16 opcode;
+ int status;
cmd = &desc.params.lldp_agent_ctrl;
@@ -724,7 +721,7 @@ ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
*
* Get CEE DCBX mode operational configuration from firmware (0x0A07)
*/
-static enum ice_status
+static int
ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
struct ice_aqc_get_cee_dcb_cfg_resp *buff,
struct ice_sq_cd *cd)
@@ -749,7 +746,7 @@ int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd)
{
struct ice_aqc_set_query_pfc_mode *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (pfc_mode > ICE_AQC_PFC_DSCP_BASED_PFC)
return -EINVAL;
@@ -762,7 +759,7 @@ int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd)
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
if (status)
- return ice_status_to_errno(status);
+ return status;
/* FW will write the PFC mode set back into cmd->pfc_mode, but if DCB is
* disabled, FW will write back 0 to cmd->pfc_mode. After the AQ has
@@ -903,14 +900,13 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
*
* Get IEEE or CEE mode DCB configuration from the Firmware
*/
-static enum ice_status
-ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
+static int ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
{
struct ice_dcbx_cfg *dcbx_cfg = NULL;
- enum ice_status ret;
+ int ret;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (dcbx_mode == ICE_DCBX_MODE_IEEE)
dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
@@ -943,14 +939,14 @@ out:
*
* Get DCB configuration from the Firmware
*/
-enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
+int ice_get_dcb_cfg(struct ice_port_info *pi)
{
struct ice_aqc_get_cee_dcb_cfg_resp cee_cfg;
struct ice_dcbx_cfg *dcbx_cfg;
- enum ice_status ret;
+ int ret;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL);
if (!ret) {
@@ -974,13 +970,13 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
*
* Update DCB configuration from the Firmware
*/
-enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
+int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
{
struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg;
- enum ice_status ret = 0;
+ int ret = 0;
if (!hw->func_caps.common_cap.dcb)
- return ICE_ERR_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
qos_cfg->is_sw_lldp = true;
@@ -996,7 +992,7 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
return ret;
qos_cfg->is_sw_lldp = false;
} else if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS) {
- return ICE_ERR_NOT_READY;
+ return -EBUSY;
}
/* Configure the LLDP MIB change event */
@@ -1016,19 +1012,19 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
*
* Configure (disable/enable) MIB
*/
-enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib)
+int ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib)
{
struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg;
- enum ice_status ret;
+ int ret;
if (!hw->func_caps.common_cap.dcb)
- return ICE_ERR_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
/* Get DCBX status */
qos_cfg->dcbx_status = ice_get_dcbx_status(hw);
if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS)
- return ICE_ERR_NOT_READY;
+ return -EBUSY;
ret = ice_aq_cfg_lldp_mib_change(hw, ena_mib, NULL);
if (!ret)
@@ -1469,16 +1465,16 @@ ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg)
*
* Set DCB configuration to the Firmware
*/
-enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
+int ice_set_dcb_cfg(struct ice_port_info *pi)
{
u8 mib_type, *lldpmib = NULL;
struct ice_dcbx_cfg *dcbcfg;
- enum ice_status ret;
struct ice_hw *hw;
u16 miblen;
+ int ret;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw = pi->hw;
@@ -1487,7 +1483,7 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
/* Allocate the LLDPDU */
lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL);
if (!lldpmib)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING)
@@ -1511,17 +1507,17 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
*
* query current port ETS configuration
*/
-static enum ice_status
+static int
ice_aq_query_port_ets(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aqc_query_port_ets *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
cmd = &desc.params.port_ets;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_port_ets);
cmd->port_teid = pi->root->info.node_teid;
@@ -1537,18 +1533,18 @@ ice_aq_query_port_ets(struct ice_port_info *pi,
*
* update the SW DB with the new TC changes
*/
-static enum ice_status
+static int
ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf)
{
struct ice_sched_node *node, *tc_node;
struct ice_aqc_txsched_elem_data elem;
- enum ice_status status = 0;
u32 teid1, teid2;
+ int status = 0;
u8 i, j;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* suspend the missing TC nodes */
for (i = 0; i < pi->root->num_children; i++) {
teid1 = le32_to_cpu(pi->root->children[i]->info.node_teid);
@@ -1605,12 +1601,12 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
* query current port ETS configuration and update the
* SW DB with the TC changes
*/
-enum ice_status
+int
ice_query_port_ets(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
- enum ice_status status;
+ int status;
mutex_lock(&pi->sched_lock);
status = ice_aq_query_port_ets(pi, buf, buf_size, cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h
index 9b6f87a889a6..d73348f279f7 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.h
@@ -138,28 +138,27 @@ struct ice_cee_app_prio {
} __packed;
int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
struct ice_dcbx_cfg *dcbcfg);
-enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi);
-enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi);
-enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change);
-enum ice_status
+int ice_get_dcb_cfg(struct ice_port_info *pi);
+int ice_set_dcb_cfg(struct ice_port_info *pi);
+int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change);
+int
ice_query_port_ets(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf, u16 buf_size,
struct ice_sq_cd *cmd_details);
#ifdef CONFIG_DCB
-enum ice_status
+int
ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
struct ice_sq_cd *cd);
-enum ice_status
-ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd);
-enum ice_status
+int ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd);
+int
ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
bool *dcbx_agent_status, struct ice_sq_cd *cd);
-enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib);
+int ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib);
#else /* CONFIG_DCB */
-static inline enum ice_status
+static inline int
ice_aq_stop_lldp(struct ice_hw __always_unused *hw,
bool __always_unused shutdown_lldp_agent,
bool __always_unused persist,
@@ -168,7 +167,7 @@ ice_aq_stop_lldp(struct ice_hw __always_unused *hw,
return 0;
}
-static inline enum ice_status
+static inline int
ice_aq_start_lldp(struct ice_hw __always_unused *hw,
bool __always_unused persist,
struct ice_sq_cd __always_unused *cd)
@@ -176,7 +175,7 @@ ice_aq_start_lldp(struct ice_hw __always_unused *hw,
return 0;
}
-static inline enum ice_status
+static inline int
ice_aq_start_stop_dcbx(struct ice_hw __always_unused *hw,
bool __always_unused start_dcbx_agent,
bool *dcbx_agent_status,
@@ -187,7 +186,7 @@ ice_aq_start_stop_dcbx(struct ice_hw __always_unused *hw,
return 0;
}
-static inline enum ice_status
+static inline int
ice_cfg_lldp_mib_change(struct ice_hw __always_unused *hw,
bool __always_unused ena_mib)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index a72e18320a22..b94d8daeaa58 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -528,7 +528,7 @@ void ice_dcb_rebuild(struct ice_pf *pf)
struct ice_aqc_port_ets_elem buf = { 0 };
struct device *dev = ice_pf_to_dev(pf);
struct ice_dcbx_cfg *err_cfg;
- enum ice_status ret;
+ int ret;
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index 7fdeb411b6df..3eb01731e496 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -97,6 +97,9 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
new_cfg->etscfg.maxtcs = pf->hw.func_caps.common_cap.maxtc;
+ if (!bwcfg)
+ new_cfg->etscfg.tcbwtable[0] = 100;
+
if (!bwrec)
new_cfg->etsrec.tcbwtable[0] = 100;
@@ -167,15 +170,18 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
if (mode == pf->dcbx_cap)
return ICE_DCB_NO_HW_CHG;
- pf->dcbx_cap = mode;
qos_cfg = &pf->hw.port_info->qos_cfg;
- if (mode & DCB_CAP_DCBX_VER_CEE) {
- if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP)
- return ICE_DCB_NO_HW_CHG;
+
+ /* DSCP configuration is not DCBx negotiated */
+ if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP)
+ return ICE_DCB_NO_HW_CHG;
+
+ pf->dcbx_cap = mode;
+
+ if (mode & DCB_CAP_DCBX_VER_CEE)
qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE;
- } else {
+ else
qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE;
- }
dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode);
return ICE_DCB_HW_CHG_RST;
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index 478412b28a76..737ee23f5a87 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -39,13 +39,13 @@ static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx)
static void ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
+ int status;
status = ice_read_pba_string(hw, (u8 *)ctx->buf, sizeof(ctx->buf));
if (status)
/* We failed to locate the PBA, so just skip this entry */
- dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %s\n",
- ice_stat_str(status));
+ dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %d\n",
+ status);
}
static void ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx)
@@ -251,7 +251,6 @@ static int ice_devlink_info_get(struct devlink *devlink,
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
struct ice_info_ctx *ctx;
- enum ice_status status;
size_t i;
int err;
@@ -266,20 +265,19 @@ static int ice_devlink_info_get(struct devlink *devlink,
return -ENOMEM;
/* discover capabilities first */
- status = ice_discover_dev_caps(hw, &ctx->dev_caps);
- if (status) {
- dev_dbg(dev, "Failed to discover device capabilities, status %s aq_err %s\n",
- ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ err = ice_discover_dev_caps(hw, &ctx->dev_caps);
+ if (err) {
+ dev_dbg(dev, "Failed to discover device capabilities, status %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Unable to discover device capabilities");
- err = -EIO;
goto out_free_ctx;
}
if (ctx->dev_caps.common_cap.nvm_update_pending_orom) {
- status = ice_get_inactive_orom_ver(hw, &ctx->pending_orom);
- if (status) {
- dev_dbg(dev, "Unable to read inactive Option ROM version data, status %s aq_err %s\n",
- ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ err = ice_get_inactive_orom_ver(hw, &ctx->pending_orom);
+ if (err) {
+ dev_dbg(dev, "Unable to read inactive Option ROM version data, status %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
/* disable display of pending Option ROM */
ctx->dev_caps.common_cap.nvm_update_pending_orom = false;
@@ -287,10 +285,10 @@ static int ice_devlink_info_get(struct devlink *devlink,
}
if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) {
- status = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm);
- if (status) {
- dev_dbg(dev, "Unable to read inactive NVM version data, status %s aq_err %s\n",
- ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ err = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm);
+ if (err) {
+ dev_dbg(dev, "Unable to read inactive NVM version data, status %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
/* disable display of pending Option ROM */
ctx->dev_caps.common_cap.nvm_update_pending_nvm = false;
@@ -298,10 +296,10 @@ static int ice_devlink_info_get(struct devlink *devlink,
}
if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) {
- status = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist);
- if (status) {
- dev_dbg(dev, "Unable to read inactive Netlist version data, status %s aq_err %s\n",
- ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ err = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist);
+ if (err) {
+ dev_dbg(dev, "Unable to read inactive Netlist version data, status %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
/* disable display of pending Option ROM */
ctx->dev_caps.common_cap.nvm_update_pending_netlist = false;
@@ -436,7 +434,7 @@ ice_devlink_enable_roce_get(struct devlink *devlink, u32 id,
{
struct ice_pf *pf = devlink_priv(devlink);
- ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2;
+ ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? true : false;
return 0;
}
@@ -762,9 +760,9 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
struct ice_pf *pf = devlink_priv(devlink);
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
void *nvm_data;
u32 nvm_size;
+ int status;
nvm_size = hw->flash.flash_size;
nvm_data = vzalloc(nvm_size);
@@ -777,7 +775,7 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
status, hw->adminq.sq_last_status);
NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
vfree(nvm_data);
- return -EIO;
+ return status;
}
status = ice_read_flat_nvm(hw, 0, &nvm_size, nvm_data, false);
@@ -787,7 +785,7 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
ice_release_nvm(hw);
vfree(nvm_data);
- return -EIO;
+ return status;
}
ice_release_nvm(hw);
@@ -819,8 +817,8 @@ ice_devlink_devcaps_snapshot(struct devlink *devlink,
struct ice_pf *pf = devlink_priv(devlink);
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
void *devcaps;
+ int status;
devcaps = vzalloc(ICE_AQ_MAX_BUF_LEN);
if (!devcaps)
@@ -833,7 +831,7 @@ ice_devlink_devcaps_snapshot(struct devlink *devlink,
status, hw->adminq.sq_last_status);
NL_SET_ERR_MSG_MOD(extack, "Failed to read device capabilities");
vfree(devcaps);
- return -EIO;
+ return status;
}
*data = (u8 *)devcaps;
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h
index faea757fcf5d..fe006d9946f8 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.h
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.h
@@ -4,10 +4,6 @@
#ifndef _ICE_DEVLINK_H_
#define _ICE_DEVLINK_H_
-enum ice_devlink_param_id {
- ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
-};
-
struct ice_pf *ice_allocate_pf(struct device *dev);
void ice_devlink_register(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 5af2faaa21e1..aefd9c3d450b 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -270,9 +270,8 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
struct device *dev;
- int ret = 0;
+ int ret;
u8 *buf;
dev = ice_pf_to_dev(pf);
@@ -285,22 +284,18 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
if (!buf)
return -ENOMEM;
- status = ice_acquire_nvm(hw, ICE_RES_READ);
- if (status) {
- dev_err(dev, "ice_acquire_nvm failed, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- ret = -EIO;
+ ret = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (ret) {
+ dev_err(dev, "ice_acquire_nvm failed, err %d aq_err %s\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status));
goto out;
}
- status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf,
- false);
- if (status) {
- dev_err(dev, "ice_read_flat_nvm failed, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- ret = -EIO;
+ ret = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf,
+ false);
+ if (ret) {
+ dev_err(dev, "ice_read_flat_nvm failed, err %d aq_err %s\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status));
goto release;
}
@@ -342,14 +337,14 @@ static bool ice_active_vfs(struct ice_pf *pf)
static u64 ice_link_test(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- enum ice_status status;
bool link_up = false;
+ int status;
netdev_info(netdev, "link test\n");
status = ice_get_link_status(np->vsi->port_info, &link_up);
if (status) {
- netdev_err(netdev, "link query error, status = %s\n",
- ice_stat_str(status));
+ netdev_err(netdev, "link query error, status = %d\n",
+ status);
return 1;
}
@@ -1052,8 +1047,7 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
struct ice_link_status *link_info;
struct ice_vsi *vsi = np->vsi;
struct ice_port_info *pi;
- enum ice_status status;
- int err = 0;
+ int err;
pi = vsi->port_info;
@@ -1079,12 +1073,10 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
if (!caps)
return -ENOMEM;
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
- caps, NULL);
- if (status) {
- err = -EAGAIN;
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
+ caps, NULL);
+ if (err)
goto done;
- }
/* Set supported/configured FEC modes based on PHY capability */
if (caps->caps & ICE_AQC_PHY_EN_AUTO_FEC)
@@ -1203,7 +1195,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
if (test_bit(ICE_FLAG_FW_LLDP_AGENT, change_flags)) {
if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) {
- enum ice_status status;
+ int status;
/* Disable FW LLDP engine */
status = ice_cfg_lldp_mib_change(&pf->hw, false);
@@ -1232,8 +1224,8 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
pf->dcbx_cap &= ~DCB_CAP_DCBX_LLD_MANAGED;
pf->dcbx_cap |= DCB_CAP_DCBX_HOST;
} else {
- enum ice_status status;
bool dcbx_agent_status;
+ int status;
if (ice_get_pfc_mode(pf) == ICE_QOS_MODE_DSCP) {
clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
@@ -1938,8 +1930,7 @@ ice_get_link_ksettings(struct net_device *netdev,
struct ice_aqc_get_phy_caps_data *caps;
struct ice_link_status *hw_link_info;
struct ice_vsi *vsi = np->vsi;
- enum ice_status status;
- int err = 0;
+ int err;
ethtool_link_ksettings_zero_link_mode(ks, supported);
ethtool_link_ksettings_zero_link_mode(ks, advertising);
@@ -1990,12 +1981,10 @@ ice_get_link_ksettings(struct net_device *netdev,
if (!caps)
return -ENOMEM;
- status = ice_aq_get_phy_caps(vsi->port_info, false,
- ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
- if (status) {
- err = -EIO;
+ err = ice_aq_get_phy_caps(vsi->port_info, false,
+ ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
+ if (err)
goto done;
- }
/* Set the advertised flow control based on the PHY capability */
if ((caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) &&
@@ -2027,12 +2016,10 @@ ice_get_link_ksettings(struct net_device *netdev,
caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
- status = ice_aq_get_phy_caps(vsi->port_info, false,
- ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL);
- if (status) {
- err = -EIO;
+ err = ice_aq_get_phy_caps(vsi->port_info, false,
+ ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL);
+ if (err)
goto done;
- }
/* Set supported FEC modes based on PHY capability */
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
@@ -2210,11 +2197,10 @@ ice_set_link_ksettings(struct net_device *netdev,
struct ice_pf *pf = np->vsi->back;
struct ice_port_info *pi;
u8 autoneg_changed = 0;
- enum ice_status status;
u64 phy_type_high = 0;
u64 phy_type_low = 0;
- int err = 0;
bool linkup;
+ int err;
pi = np->vsi->port_info;
@@ -2234,15 +2220,13 @@ ice_set_link_ksettings(struct net_device *netdev,
/* Get the PHY capabilities based on media */
if (ice_fw_supports_report_dflt_cfg(pi->hw))
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
- phy_caps, NULL);
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
+ phy_caps, NULL);
else
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
- phy_caps, NULL);
- if (status) {
- err = -EIO;
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
+ phy_caps, NULL);
+ if (err)
goto done;
- }
/* save autoneg out of ksettings */
autoneg = copy_ks.base.autoneg;
@@ -2308,11 +2292,9 @@ ice_set_link_ksettings(struct net_device *netdev,
/* Call to get the current link speed */
pi->phy.get_link_info = true;
- status = ice_get_link_status(pi, &linkup);
- if (status) {
- err = -EIO;
+ err = ice_get_link_status(pi, &linkup);
+ if (err)
goto done;
- }
curr_link_speed = pi->phy.link_info.link_speed;
adv_link_speed = ice_ksettings_find_adv_link_speed(ks);
@@ -2381,10 +2363,9 @@ ice_set_link_ksettings(struct net_device *netdev,
}
/* make the aq call */
- status = ice_aq_set_phy_cfg(&pf->hw, pi, &config, NULL);
- if (status) {
+ err = ice_aq_set_phy_cfg(&pf->hw, pi, &config, NULL);
+ if (err) {
netdev_info(netdev, "Set phy config failed,\n");
- err = -EIO;
goto done;
}
@@ -2522,9 +2503,9 @@ static int
ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
{
struct ice_pf *pf = vsi->back;
- enum ice_status status;
struct device *dev;
u64 hashed_flds;
+ int status;
u32 hdrs;
dev = ice_pf_to_dev(pf);
@@ -2550,9 +2531,9 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
status = ice_add_rss_cfg(&pf->hw, vsi->idx, hashed_flds, hdrs);
if (status) {
- dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %s\n",
- vsi->vsi_num, ice_stat_str(status));
- return -EINVAL;
+ dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n",
+ vsi->vsi_num, status);
+ return status;
}
return 0;
@@ -2953,7 +2934,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
struct ice_port_info *pi = np->vsi->port_info;
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_dcbx_cfg *dcbx_cfg;
- enum ice_status status;
+ int status;
/* Initialize pause params */
pause->rx_pause = 0;
@@ -3003,11 +2984,10 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
struct ice_vsi *vsi = np->vsi;
struct ice_hw *hw = &pf->hw;
struct ice_port_info *pi;
- enum ice_status status;
u8 aq_failures;
bool link_up;
- int err = 0;
u32 is_an;
+ int err;
pi = vsi->port_info;
hw_link_info = &pi->phy.link_info;
@@ -3033,11 +3013,11 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
return -ENOMEM;
/* Get current PHY config */
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
- NULL);
- if (status) {
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
+ NULL);
+ if (err) {
kfree(pcaps);
- return -EIO;
+ return err;
}
is_an = ice_is_phy_caps_an_enabled(pcaps) ? AUTONEG_ENABLE :
@@ -3073,22 +3053,19 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
return -EINVAL;
/* Set the FC mode and only restart AN if link is up */
- status = ice_set_fc(pi, &aq_failures, link_up);
+ err = ice_set_fc(pi, &aq_failures, link_up);
if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) {
- netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
} else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) {
- netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
} else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) {
- netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
}
@@ -3928,16 +3905,16 @@ ice_get_module_info(struct net_device *netdev,
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
u8 sff8472_comp = 0;
u8 sff8472_swap = 0;
u8 sff8636_rev = 0;
u8 value = 0;
+ int status;
status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 0x00, 0x00,
0, &value, 1, 0, NULL);
if (status)
- return -EIO;
+ return status;
switch (value) {
case ICE_MODULE_TYPE_SFP:
@@ -3945,12 +3922,12 @@ ice_get_module_info(struct net_device *netdev,
ICE_MODULE_SFF_8472_COMP, 0x00, 0,
&sff8472_comp, 1, 0, NULL);
if (status)
- return -EIO;
+ return status;
status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
ICE_MODULE_SFF_8472_SWAP, 0x00, 0,
&sff8472_swap, 1, 0, NULL);
if (status)
- return -EIO;
+ return status;
if (sff8472_swap & ICE_MODULE_SFF_ADDR_MODE) {
modinfo->type = ETH_MODULE_SFF_8079;
@@ -3970,7 +3947,7 @@ ice_get_module_info(struct net_device *netdev,
ICE_MODULE_REVISION_ADDR, 0x00, 0,
&sff8636_rev, 1, 0, NULL);
if (status)
- return -EIO;
+ return status;
/* Check revision compliance */
if (sff8636_rev > 0x02) {
/* Module is SFF-8636 compliant */
@@ -4005,11 +3982,11 @@ ice_get_module_eeprom(struct net_device *netdev,
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
bool is_sfp = false;
unsigned int i, j;
u16 offset = 0;
u8 page = 0;
+ int status;
if (!ee || !ee->len || !data)
return -EINVAL;
@@ -4017,7 +3994,7 @@ ice_get_module_eeprom(struct net_device *netdev,
status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0, value, 1, 0,
NULL);
if (status)
- return -EIO;
+ return status;
if (value[0] == ICE_MODULE_TYPE_SFP)
is_sfp = true;
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index 38960bcc384c..bbc64d6ce4cd 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -530,7 +530,6 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
struct ice_flow_prof *prof = NULL;
struct ice_fd_hw_prof *hw_prof;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
u64 entry1_h = 0;
u64 entry2_h = 0;
u64 prof_id;
@@ -581,24 +580,20 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
* actions (NULL) and zero actions 0.
*/
prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
- status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
- TNL_SEG_CNT(tun), &prof);
- if (status)
- return ice_status_to_errno(status);
- status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
- main_vsi->idx, ICE_FLOW_PRIO_NORMAL,
- seg, &entry1_h);
- if (status) {
- err = ice_status_to_errno(status);
+ err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
+ TNL_SEG_CNT(tun), &prof);
+ if (err)
+ return err;
+ err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
+ main_vsi->idx, ICE_FLOW_PRIO_NORMAL,
+ seg, &entry1_h);
+ if (err)
goto err_prof;
- }
- status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
- ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
- seg, &entry2_h);
- if (status) {
- err = ice_status_to_errno(status);
+ err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
+ ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
+ seg, &entry2_h);
+ if (err)
goto err_entry;
- }
hw_prof->fdir_seg[tun] = seg;
hw_prof->entry_h[0][tun] = entry1_h;
@@ -1190,7 +1185,6 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
struct ice_hw *hw = &pf->hw;
struct ice_fltr_desc desc;
struct ice_vsi *ctrl_vsi;
- enum ice_status status;
u8 *pkt, *frag_pkt;
bool has_frag;
int err;
@@ -1209,11 +1203,9 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
}
ice_fdir_get_prgm_desc(hw, input, &desc, add);
- status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
- if (status) {
- err = ice_status_to_errno(status);
+ err = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
+ if (err)
goto err_free_all;
- }
err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
if (err)
goto err_free_all;
@@ -1223,12 +1215,10 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
if (has_frag) {
/* does not return error */
ice_fdir_get_prgm_desc(hw, input, &desc, add);
- status = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true,
- is_tun);
- if (status) {
- err = ice_status_to_errno(status);
+ err = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true,
+ is_tun);
+ if (err)
goto err_frag;
- }
err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, frag_pkt);
if (err)
goto err_frag;
@@ -1268,7 +1258,7 @@ ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input,
bool is_tun = tun == ICE_FD_HW_SEG_TUN;
int err;
- if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num))
+ if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num, TNL_ALL))
continue;
err = ice_fdir_write_fltr(pf, input, add, is_tun);
if (err)
@@ -1652,7 +1642,7 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
}
/* return error if not an update and no available filters */
- fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port) ? 2 : 1;
+ fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port, TNL_ALL) ? 2 : 1;
if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) &&
ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) {
dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c
index cbd8424631e3..ae089d32ee9d 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c
@@ -712,7 +712,7 @@ ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input,
* @hw: pointer to the hardware structure
* @cntr_id: returns counter index
*/
-enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id)
+int ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id)
{
return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK,
ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, cntr_id);
@@ -723,7 +723,7 @@ enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id)
* @hw: pointer to the hardware structure
* @cntr_id: counter index to be freed
*/
-enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id)
+int ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id)
{
return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK,
ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, cntr_id);
@@ -735,8 +735,7 @@ enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id)
* @cntr_id: returns counter index
* @num_fltr: number of filter entries to be allocated
*/
-enum ice_status
-ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr)
+int ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr)
{
return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES,
ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr,
@@ -749,8 +748,7 @@ ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr)
* @cntr_id: returns counter index
* @num_fltr: number of filter entries to be allocated
*/
-enum ice_status
-ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr)
+int ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr)
{
return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES,
ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr,
@@ -872,7 +870,7 @@ static void ice_pkt_insert_mac_addr(u8 *pkt, u8 *addr)
* @frag: generate a fragment packet
* @tun: true implies generate a tunnel packet
*/
-enum ice_status
+int
ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
u8 *pkt, bool frag, bool tun)
{
@@ -919,15 +917,15 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
if (ice_fdir_pkt[idx].flow == flow)
break;
if (idx == ICE_FDIR_NUM_PKT)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (!tun) {
memcpy(pkt, ice_fdir_pkt[idx].pkt, ice_fdir_pkt[idx].pkt_len);
loc = pkt;
} else {
- if (!ice_get_open_tunnel_port(hw, &tnl_port))
- return ICE_ERR_DOES_NOT_EXIST;
+ if (!ice_get_open_tunnel_port(hw, &tnl_port, TNL_ALL))
+ return -ENOENT;
if (!ice_fdir_pkt[idx].tun_pkt)
- return ICE_ERR_PARAM;
+ return -EINVAL;
memcpy(pkt, ice_fdir_pkt[idx].tun_pkt,
ice_fdir_pkt[idx].tun_pkt_len);
ice_pkt_insert_u16(pkt, ICE_IPV4_UDP_DST_PORT_OFFSET,
@@ -1111,7 +1109,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
break;
default:
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
if (input->flex_fltr)
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h
index da4163856f4c..b6c7c6903f35 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.h
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.h
@@ -201,16 +201,14 @@ struct ice_fdir_base_pkt {
const u8 *tun_pkt;
};
-enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id);
-enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id);
-enum ice_status
-ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
-enum ice_status
-ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
+int ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id);
+int ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id);
+int ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
+int ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
void
ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input,
struct ice_fltr_desc *fdesc, bool add);
-enum ice_status
+int
ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
u8 *pkt, bool frag, bool tun);
int ice_get_fdir_cnt_all(struct ice_hw *hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 23cfcceb1536..d29197ab3d02 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -314,6 +314,78 @@ ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
}
/**
+ * ice_hw_ptype_ena - check if the PTYPE is enabled or not
+ * @hw: pointer to the HW structure
+ * @ptype: the hardware PTYPE
+ */
+bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype)
+{
+ return ptype < ICE_FLOW_PTYPE_MAX &&
+ test_bit(ptype, hw->hw_ptype);
+}
+
+/**
+ * ice_marker_ptype_tcam_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the Marker PType TCAM entry to be returned
+ * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual Marker PType TCAM entries.
+ */
+static void *
+ice_marker_ptype_tcam_handler(u32 sect_type, void *section, u32 index,
+ u32 *offset)
+{
+ struct ice_marker_ptype_tcam_section *marker_ptype;
+
+ if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE)
+ return NULL;
+
+ if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF)
+ return NULL;
+
+ if (offset)
+ *offset = 0;
+
+ marker_ptype = section;
+ if (index >= le16_to_cpu(marker_ptype->count))
+ return NULL;
+
+ return marker_ptype->tcam + index;
+}
+
+/**
+ * ice_fill_hw_ptype - fill the enabled PTYPE bit information
+ * @hw: pointer to the HW structure
+ */
+static void ice_fill_hw_ptype(struct ice_hw *hw)
+{
+ struct ice_marker_ptype_tcam_entry *tcam;
+ struct ice_seg *seg = hw->seg;
+ struct ice_pkg_enum state;
+
+ bitmap_zero(hw->hw_ptype, ICE_FLOW_PTYPE_MAX);
+ if (!seg)
+ return;
+
+ memset(&state, 0, sizeof(state));
+
+ do {
+ tcam = ice_pkg_enum_entry(seg, &state,
+ ICE_SID_RXPARSER_MARKER_PTYPE, NULL,
+ ice_marker_ptype_tcam_handler);
+ if (tcam &&
+ le16_to_cpu(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX &&
+ le16_to_cpu(tcam->ptype) < ICE_FLOW_PTYPE_MAX)
+ set_bit(le16_to_cpu(tcam->ptype), hw->hw_ptype);
+
+ seg = NULL;
+ } while (tcam);
+}
+
+/**
* ice_boost_tcam_handler
* @sect_type: section type
* @section: pointer to section
@@ -358,7 +430,7 @@ ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
* if it is found. The ice_seg parameter must not be NULL since the first call
* to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
*/
-static enum ice_status
+static int
ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
struct ice_boost_tcam_entry **entry)
{
@@ -368,7 +440,7 @@ ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
memset(&state, 0, sizeof(state));
if (!ice_seg)
- return ICE_ERR_PARAM;
+ return -EINVAL;
do {
tcam = ice_pkg_enum_entry(ice_seg, &state,
@@ -383,7 +455,7 @@ ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
} while (tcam);
*entry = NULL;
- return ICE_ERR_CFG;
+ return -EIO;
}
/**
@@ -549,7 +621,7 @@ static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
* ------------------------------
* Result: key: b01 10 11 11 00 00
*/
-static enum ice_status
+static int
ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
u8 *key_inv)
{
@@ -558,7 +630,7 @@ ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
/* 'dont_care' and 'nvr_mtch' masks cannot overlap */
if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
- return ICE_ERR_CFG;
+ return -EIO;
*key = 0;
*key_inv = 0;
@@ -651,7 +723,7 @@ static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
* dc == NULL --> dc mask is all 0's (no don't care bits)
* nm == NULL --> nm mask is all 0's (no never match bits)
*/
-static enum ice_status
+static int
ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
u16 len)
{
@@ -660,11 +732,11 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
/* size must be a multiple of 2 bytes. */
if (size % 2)
- return ICE_ERR_CFG;
+ return -EIO;
half_size = size / 2;
if (off + len > half_size)
- return ICE_ERR_CFG;
+ return -EIO;
/* Make sure at most one bit is set in the never match mask. Having more
* than one never match mask bit set will cause HW to consume excessive
@@ -672,13 +744,13 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
*/
#define ICE_NVR_MTCH_BITS_MAX 1
if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
- return ICE_ERR_CFG;
+ return -EIO;
for (i = 0; i < len; i++)
if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
dc ? dc[i] : 0, nm ? nm[i] : 0,
key + off + i, key + half_size + off + i))
- return ICE_ERR_CFG;
+ return -EIO;
return 0;
}
@@ -692,25 +764,25 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
* or writing of the package. When attempting to obtain write access, the
* caller must check for the following two return values:
*
- * ICE_SUCCESS - Means the caller has acquired the global config lock
- * and can perform writing of the package.
- * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
- * package or has found that no update was necessary; in
- * this case, the caller can just skip performing any
- * update of the package.
- */
-static enum ice_status
+ * 0 - Means the caller has acquired the global config lock
+ * and can perform writing of the package.
+ * -EALREADY - Indicates another driver has already written the
+ * package or has found that no update was necessary; in
+ * this case, the caller can just skip performing any
+ * update of the package.
+ */
+static int
ice_acquire_global_cfg_lock(struct ice_hw *hw,
enum ice_aq_res_access_type access)
{
- enum ice_status status;
+ int status;
status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
ICE_GLOBAL_CFG_LOCK_TIMEOUT);
if (!status)
mutex_lock(&ice_global_cfg_lock_sw);
- else if (status == ICE_ERR_AQ_NO_WORK)
+ else if (status == -EALREADY)
ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
return status;
@@ -735,7 +807,7 @@ static void ice_release_global_cfg_lock(struct ice_hw *hw)
*
* This function will request ownership of the change lock.
*/
-enum ice_status
+int
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
{
return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
@@ -765,14 +837,14 @@ void ice_release_change_lock(struct ice_hw *hw)
*
* Download Package (0x0C40)
*/
-static enum ice_status
+static int
ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u16 buf_size, bool last_buf, u32 *error_offset,
u32 *error_info, struct ice_sq_cd *cd)
{
struct ice_aqc_download_pkg *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (error_offset)
*error_offset = 0;
@@ -787,7 +859,7 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
- if (status == ICE_ERR_AQ_ERROR) {
+ if (status == -EIO) {
/* Read error from buffer only when the FW returned an error */
struct ice_aqc_download_pkg_resp *resp;
@@ -813,14 +885,14 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
*
* Update Package (0x0C42)
*/
-static enum ice_status
+static int
ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
bool last_buf, u32 *error_offset, u32 *error_info,
struct ice_sq_cd *cd)
{
struct ice_aqc_download_pkg *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (error_offset)
*error_offset = 0;
@@ -835,7 +907,7 @@ ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
- if (status == ICE_ERR_AQ_ERROR) {
+ if (status == -EIO) {
/* Read error from buffer only when the FW returned an error */
struct ice_aqc_download_pkg_resp *resp;
@@ -892,11 +964,10 @@ ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
*
* Obtains change lock and updates package.
*/
-static enum ice_status
-ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+static int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
{
- enum ice_status status;
u32 offset, info, i;
+ int status;
status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
if (status)
@@ -921,6 +992,22 @@ ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
return status;
}
+static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err)
+{
+ switch (aq_err) {
+ case ICE_AQ_RC_ENOSEC:
+ case ICE_AQ_RC_EBADSIG:
+ return ICE_DDP_PKG_FILE_SIGNATURE_INVALID;
+ case ICE_AQ_RC_ESVN:
+ return ICE_DDP_PKG_FILE_REVISION_TOO_LOW;
+ case ICE_AQ_RC_EBADMAN:
+ case ICE_AQ_RC_EBADBUF:
+ return ICE_DDP_PKG_LOAD_ERROR;
+ default:
+ return ICE_DDP_PKG_ERR;
+ }
+}
+
/**
* ice_dwnld_cfg_bufs
* @hw: pointer to the hardware structure
@@ -931,15 +1018,17 @@ ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
* to the firmware. Metadata buffers are skipped, and the first metadata buffer
* found indicates that the rest of the buffers are all metadata buffers.
*/
-static enum ice_status
+static enum ice_ddp_state
ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
{
- enum ice_status status;
+ enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
struct ice_buf_hdr *bh;
+ enum ice_aq_err err;
u32 offset, info, i;
+ int status;
if (!bufs || !count)
- return ICE_ERR_PARAM;
+ return ICE_DDP_PKG_ERR;
/* If the first buffer's first section has its metadata bit set
* then there are no buffers to be downloaded, and the operation is
@@ -947,20 +1036,13 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
*/
bh = (struct ice_buf_hdr *)bufs;
if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
- return 0;
-
- /* reset pkg_dwnld_status in case this function is called in the
- * reset/rebuild flow
- */
- hw->pkg_dwnld_status = ICE_AQ_RC_OK;
+ return ICE_DDP_PKG_SUCCESS;
status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
if (status) {
- if (status == ICE_ERR_AQ_NO_WORK)
- hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
- else
- hw->pkg_dwnld_status = hw->adminq.sq_last_status;
- return status;
+ if (status == -EALREADY)
+ return ICE_DDP_PKG_ALREADY_LOADED;
+ return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status);
}
for (i = 0; i < count; i++) {
@@ -986,11 +1068,11 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
&offset, &info, NULL);
/* Save AQ status from download package */
- hw->pkg_dwnld_status = hw->adminq.sq_last_status;
if (status) {
ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
status, offset, info);
-
+ err = hw->adminq.sq_last_status;
+ state = ice_map_aq_err_to_ddp_state(err);
break;
}
@@ -1000,7 +1082,7 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
ice_release_global_cfg_lock(hw);
- return status;
+ return state;
}
/**
@@ -1012,7 +1094,7 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
*
* Get Package Info List (0x0C43)
*/
-static enum ice_status
+static int
ice_aq_get_pkg_info_list(struct ice_hw *hw,
struct ice_aqc_get_pkg_info_resp *pkg_info,
u16 buf_size, struct ice_sq_cd *cd)
@@ -1031,7 +1113,7 @@ ice_aq_get_pkg_info_list(struct ice_hw *hw,
*
* Handles the download of a complete package.
*/
-static enum ice_status
+static enum ice_ddp_state
ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
{
struct ice_buf_table *ice_buf_tbl;
@@ -1062,13 +1144,13 @@ ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
*
* Saves off the package details into the HW structure.
*/
-static enum ice_status
+static enum ice_ddp_state
ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
{
struct ice_generic_seg_hdr *seg_hdr;
if (!pkg_hdr)
- return ICE_ERR_PARAM;
+ return ICE_DDP_PKG_ERR;
seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
if (seg_hdr) {
@@ -1082,7 +1164,7 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
ICE_SID_METADATA);
if (!meta) {
ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
- return ICE_ERR_CFG;
+ return ICE_DDP_PKG_INVALID_FILE;
}
hw->pkg_ver = meta->ver;
@@ -1104,10 +1186,10 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
seg_hdr->seg_id);
} else {
ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
- return ICE_ERR_CFG;
+ return ICE_DDP_PKG_INVALID_FILE;
}
- return 0;
+ return ICE_DDP_PKG_SUCCESS;
}
/**
@@ -1116,21 +1198,22 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
*
* Store details of the package currently loaded in HW into the HW structure.
*/
-static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
+static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
{
+ enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
struct ice_aqc_get_pkg_info_resp *pkg_info;
- enum ice_status status;
u16 size;
u32 i;
size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
pkg_info = kzalloc(size, GFP_KERNEL);
if (!pkg_info)
- return ICE_ERR_NO_MEMORY;
+ return ICE_DDP_PKG_ERR;
- status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
- if (status)
+ if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) {
+ state = ICE_DDP_PKG_ERR;
goto init_pkg_free_alloc;
+ }
for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
#define ICE_PKG_FLAG_COUNT 4
@@ -1165,7 +1248,7 @@ static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
init_pkg_free_alloc:
kfree(pkg_info);
- return status;
+ return state;
}
/**
@@ -1176,28 +1259,28 @@ init_pkg_free_alloc:
* Verifies various attributes of the package file, including length, format
* version, and the requirement of at least one segment.
*/
-static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
+static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
{
u32 seg_count;
u32 i;
if (len < struct_size(pkg, seg_offset, 1))
- return ICE_ERR_BUF_TOO_SHORT;
+ return ICE_DDP_PKG_INVALID_FILE;
if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
- return ICE_ERR_CFG;
+ return ICE_DDP_PKG_INVALID_FILE;
/* pkg must have at least one segment */
seg_count = le32_to_cpu(pkg->seg_count);
if (seg_count < 1)
- return ICE_ERR_CFG;
+ return ICE_DDP_PKG_INVALID_FILE;
/* make sure segment array fits in package length */
if (len < struct_size(pkg, seg_offset, seg_count))
- return ICE_ERR_BUF_TOO_SHORT;
+ return ICE_DDP_PKG_INVALID_FILE;
/* all segments must fit within length */
for (i = 0; i < seg_count; i++) {
@@ -1206,16 +1289,16 @@ static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
/* segment header must fit */
if (len < off + sizeof(*seg))
- return ICE_ERR_BUF_TOO_SHORT;
+ return ICE_DDP_PKG_INVALID_FILE;
seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
/* segment body must fit */
if (len < off + le32_to_cpu(seg->seg_size))
- return ICE_ERR_BUF_TOO_SHORT;
+ return ICE_DDP_PKG_INVALID_FILE;
}
- return 0;
+ return ICE_DDP_PKG_SUCCESS;
}
/**
@@ -1259,13 +1342,18 @@ static void ice_init_pkg_regs(struct ice_hw *hw)
* version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
* definitions.
*/
-static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
+static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
{
- if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
- pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
- return ICE_ERR_NOT_SUPPORTED;
+ if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ ||
+ (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
+ pkg_ver->minor > ICE_PKG_SUPP_VER_MNR))
+ return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH;
+ else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ ||
+ (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
+ pkg_ver->minor < ICE_PKG_SUPP_VER_MNR))
+ return ICE_DDP_PKG_FILE_VERSION_TOO_LOW;
- return 0;
+ return ICE_DDP_PKG_SUCCESS;
}
/**
@@ -1276,20 +1364,20 @@ static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
*
* This function checks the package version compatibility with driver and NVM
*/
-static enum ice_status
+static enum ice_ddp_state
ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
struct ice_seg **seg)
{
struct ice_aqc_get_pkg_info_resp *pkg;
- enum ice_status status;
+ enum ice_ddp_state state;
u16 size;
u32 i;
/* Check package version compatibility */
- status = ice_chk_pkg_version(&hw->pkg_ver);
- if (status) {
+ state = ice_chk_pkg_version(&hw->pkg_ver);
+ if (state) {
ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
- return status;
+ return state;
}
/* find ICE segment in given package */
@@ -1297,18 +1385,19 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
ospkg);
if (!*seg) {
ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
- return ICE_ERR_CFG;
+ return ICE_DDP_PKG_INVALID_FILE;
}
/* Check if FW is compatible with the OS package */
size = struct_size(pkg, pkg_info, ICE_PKG_CNT);
pkg = kzalloc(size, GFP_KERNEL);
if (!pkg)
- return ICE_ERR_NO_MEMORY;
+ return ICE_DDP_PKG_ERR;
- status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
- if (status)
+ if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) {
+ state = ICE_DDP_PKG_LOAD_ERROR;
goto fw_ddp_compat_free_alloc;
+ }
for (i = 0; i < le32_to_cpu(pkg->count); i++) {
/* loop till we find the NVM package */
@@ -1318,7 +1407,7 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
pkg->pkg_info[i].ver.major ||
(*seg)->hdr.seg_format_ver.minor >
pkg->pkg_info[i].ver.minor) {
- status = ICE_ERR_FW_DDP_MISMATCH;
+ state = ICE_DDP_PKG_FW_MISMATCH;
ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
}
/* done processing NVM package so break */
@@ -1326,7 +1415,7 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
}
fw_ddp_compat_free_alloc:
kfree(pkg);
- return status;
+ return state;
}
/**
@@ -1367,7 +1456,7 @@ ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
* and store the index number in struct ice_switch_info *switch_info
* in HW for following use.
*/
-static enum ice_status ice_get_prof_index_max(struct ice_hw *hw)
+static int ice_get_prof_index_max(struct ice_hw *hw)
{
u16 prof_index = 0, j, max_prof_index = 0;
struct ice_pkg_enum state;
@@ -1379,7 +1468,7 @@ static enum ice_status ice_get_prof_index_max(struct ice_hw *hw)
memset(&state, 0, sizeof(state));
if (!hw->seg)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_seg = hw->seg;
@@ -1410,6 +1499,34 @@ static enum ice_status ice_get_prof_index_max(struct ice_hw *hw)
}
/**
+ * ice_get_ddp_pkg_state - get DDP pkg state after download
+ * @hw: pointer to the HW struct
+ * @already_loaded: indicates if pkg was already loaded onto the device
+ */
+static enum ice_ddp_state
+ice_get_ddp_pkg_state(struct ice_hw *hw, bool already_loaded)
+{
+ if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
+ hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
+ hw->pkg_ver.update == hw->active_pkg_ver.update &&
+ hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
+ !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) {
+ if (already_loaded)
+ return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED;
+ else
+ return ICE_DDP_PKG_SUCCESS;
+ } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
+ hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
+ return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED;
+ } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
+ hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
+ return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED;
+ } else {
+ return ICE_DDP_PKG_ERR;
+ }
+}
+
+/**
* ice_init_pkg - initialize/download package
* @hw: pointer to the hardware structure
* @buf: pointer to the package buffer
@@ -1434,53 +1551,54 @@ static enum ice_status ice_get_prof_index_max(struct ice_hw *hw)
* ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
* case.
*/
-enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
+enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
{
+ bool already_loaded = false;
+ enum ice_ddp_state state;
struct ice_pkg_hdr *pkg;
- enum ice_status status;
struct ice_seg *seg;
if (!buf || !len)
- return ICE_ERR_PARAM;
+ return ICE_DDP_PKG_ERR;
pkg = (struct ice_pkg_hdr *)buf;
- status = ice_verify_pkg(pkg, len);
- if (status) {
+ state = ice_verify_pkg(pkg, len);
+ if (state) {
ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
- status);
- return status;
+ state);
+ return state;
}
/* initialize package info */
- status = ice_init_pkg_info(hw, pkg);
- if (status)
- return status;
+ state = ice_init_pkg_info(hw, pkg);
+ if (state)
+ return state;
/* before downloading the package, check package version for
* compatibility with driver
*/
- status = ice_chk_pkg_compat(hw, pkg, &seg);
- if (status)
- return status;
+ state = ice_chk_pkg_compat(hw, pkg, &seg);
+ if (state)
+ return state;
/* initialize package hints and then download package */
ice_init_pkg_hints(hw, seg);
- status = ice_download_pkg(hw, seg);
- if (status == ICE_ERR_AQ_NO_WORK) {
+ state = ice_download_pkg(hw, seg);
+ if (state == ICE_DDP_PKG_ALREADY_LOADED) {
ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
- status = 0;
+ already_loaded = true;
}
/* Get information on the package currently loaded in HW, then make sure
* the driver is compatible with this version.
*/
- if (!status) {
- status = ice_get_pkg_info(hw);
- if (!status)
- status = ice_chk_pkg_version(&hw->active_pkg_ver);
+ if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) {
+ state = ice_get_pkg_info(hw);
+ if (!state)
+ state = ice_get_ddp_pkg_state(hw, already_loaded);
}
- if (!status) {
+ if (ice_is_init_pkg_successful(state)) {
hw->seg = seg;
/* on successful package download update other required
* registers to support the package and fill HW tables
@@ -1488,13 +1606,14 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
*/
ice_init_pkg_regs(hw);
ice_fill_blk_tbls(hw);
+ ice_fill_hw_ptype(hw);
ice_get_prof_index_max(hw);
} else {
ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
- status);
+ state);
}
- return status;
+ return state;
}
/**
@@ -1520,18 +1639,19 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
* package buffer, as the new copy will be managed by this function and
* related routines.
*/
-enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
+enum ice_ddp_state
+ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
{
- enum ice_status status;
+ enum ice_ddp_state state;
u8 *buf_copy;
if (!buf || !len)
- return ICE_ERR_PARAM;
+ return ICE_DDP_PKG_ERR;
buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
- status = ice_init_pkg(hw, buf_copy, len);
- if (status) {
+ state = ice_init_pkg(hw, buf_copy, len);
+ if (!ice_is_init_pkg_successful(state)) {
/* Free the copy, since we failed to initialize the package */
devm_kfree(ice_hw_to_dev(hw), buf_copy);
} else {
@@ -1540,7 +1660,23 @@ enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
hw->pkg_size = len;
}
- return status;
+ return state;
+}
+
+/**
+ * ice_is_init_pkg_successful - check if DDP init was successful
+ * @state: state of the DDP pkg after download
+ */
+bool ice_is_init_pkg_successful(enum ice_ddp_state state)
+{
+ switch (state) {
+ case ICE_DDP_PKG_SUCCESS:
+ case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
+ case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
+ return true;
+ default:
+ return false;
+ }
}
/**
@@ -1644,7 +1780,7 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
* NOTE: The caller of the function is responsible for freeing the memory
* allocated for every list entry.
*/
-enum ice_status
+int
ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
unsigned long *bm, struct list_head *fv_list)
{
@@ -1658,7 +1794,7 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
memset(&state, 0, sizeof(state));
if (!ids_cnt || !hw->seg)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_seg = hw->seg;
do {
@@ -1702,7 +1838,7 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
}
} while (fv);
if (list_empty(fv_list))
- return ICE_ERR_CFG;
+ return -EIO;
return 0;
err:
@@ -1711,7 +1847,7 @@ err:
devm_kfree(ice_hw_to_dev(hw), fvl);
}
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
/**
@@ -1779,7 +1915,7 @@ static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
* result in some wasted space in the buffer.
* Note: all package contents must be in Little Endian form.
*/
-static enum ice_status
+static int
ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
{
struct ice_buf_hdr *buf;
@@ -1787,17 +1923,17 @@ ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
u16 data_end;
if (!bld)
- return ICE_ERR_PARAM;
+ return -EINVAL;
buf = (struct ice_buf_hdr *)&bld->buf;
/* already an active section, can't increase table size */
section_count = le16_to_cpu(buf->section_count);
if (section_count > 0)
- return ICE_ERR_CFG;
+ return -EIO;
if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
- return ICE_ERR_CFG;
+ return -EIO;
bld->reserved_section_table_entries += count;
data_end = le16_to_cpu(buf->data_end) +
@@ -1899,9 +2035,11 @@ static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
* ice_get_open_tunnel_port - retrieve an open tunnel port
* @hw: pointer to the HW structure
* @port: returns open port
+ * @type: type of tunnel, can be TNL_LAST if it doesn't matter
*/
bool
-ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port)
+ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port,
+ enum ice_tunnel_type type)
{
bool res = false;
u16 i;
@@ -1909,7 +2047,8 @@ ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port)
mutex_lock(&hw->tnl_lock);
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
- if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port) {
+ if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port &&
+ (type == TNL_LAST || type == hw->tnl.tbl[i].type)) {
*port = hw->tnl.tbl[i].port;
res = true;
break;
@@ -1956,19 +2095,19 @@ static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
* creating a package buffer with the tunnel info and issuing an update package
* command.
*/
-static enum ice_status
+static int
ice_create_tunnel(struct ice_hw *hw, u16 index,
enum ice_tunnel_type type, u16 port)
{
struct ice_boost_tcam_section *sect_rx, *sect_tx;
- enum ice_status status = ICE_ERR_MAX_LIMIT;
struct ice_buf_build *bld;
+ int status = -ENOSPC;
mutex_lock(&hw->tnl_lock);
bld = ice_pkg_buf_alloc(hw);
if (!bld) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto ice_create_tunnel_end;
}
@@ -2027,26 +2166,26 @@ ice_create_tunnel_end:
* targeting the specific updates requested and then performing an update
* package.
*/
-static enum ice_status
+static int
ice_destroy_tunnel(struct ice_hw *hw, u16 index, enum ice_tunnel_type type,
u16 port)
{
struct ice_boost_tcam_section *sect_rx, *sect_tx;
- enum ice_status status = ICE_ERR_MAX_LIMIT;
struct ice_buf_build *bld;
+ int status = -ENOSPC;
mutex_lock(&hw->tnl_lock);
if (WARN_ON(!hw->tnl.tbl[index].valid ||
hw->tnl.tbl[index].type != type ||
hw->tnl.tbl[index].port != port)) {
- status = ICE_ERR_OUT_OF_RANGE;
+ status = -EIO;
goto ice_destroy_tunnel_end;
}
bld = ice_pkg_buf_alloc(hw);
if (!bld) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto ice_destroy_tunnel_end;
}
@@ -2094,7 +2233,7 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
enum ice_tunnel_type tnl_type;
- enum ice_status status;
+ int status;
u16 index;
tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
@@ -2102,8 +2241,8 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
if (status) {
- netdev_err(netdev, "Error adding UDP tunnel - %s\n",
- ice_stat_str(status));
+ netdev_err(netdev, "Error adding UDP tunnel - %d\n",
+ status);
return -EIO;
}
@@ -2118,15 +2257,15 @@ int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
enum ice_tunnel_type tnl_type;
- enum ice_status status;
+ int status;
tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
status = ice_destroy_tunnel(&pf->hw, ti->hw_priv, tnl_type,
ntohs(ti->port));
if (status) {
- netdev_err(netdev, "Error removing UDP tunnel - %s\n",
- ice_stat_str(status));
+ netdev_err(netdev, "Error removing UDP tunnel - %d\n",
+ status);
return -EIO;
}
@@ -2142,17 +2281,17 @@ int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
* @prot: variable to receive the protocol ID
* @off: variable to receive the protocol offset
*/
-enum ice_status
+int
ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
u8 *prot, u16 *off)
{
struct ice_fv_word *fv_ext;
if (prof >= hw->blk[blk].es.count)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (fv_idx >= hw->blk[blk].es.fvw)
- return ICE_ERR_PARAM;
+ return -EINVAL;
fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
@@ -2175,11 +2314,11 @@ ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
* PTG ID that contains it through the PTG parameter, with the value of
* ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
*/
-static enum ice_status
+static int
ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
{
if (ptype >= ICE_XLT1_CNT || !ptg)
- return ICE_ERR_PARAM;
+ return -EINVAL;
*ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
return 0;
@@ -2209,21 +2348,21 @@ static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
* This function will remove the ptype from the specific PTG, and move it to
* the default PTG (ICE_DEFAULT_PTG).
*/
-static enum ice_status
+static int
ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
{
struct ice_ptg_ptype **ch;
struct ice_ptg_ptype *p;
if (ptype > ICE_XLT1_CNT - 1)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
/* Should not happen if .in_use is set, bad config */
if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
- return ICE_ERR_CFG;
+ return -EIO;
/* find the ptype within this PTG, and bypass the link over it */
p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
@@ -2256,17 +2395,17 @@ ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
* a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
* default PTG.
*/
-static enum ice_status
+static int
ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
{
- enum ice_status status;
u8 original_ptg;
+ int status;
if (ptype > ICE_XLT1_CNT - 1)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
if (status)
@@ -2401,11 +2540,11 @@ ice_match_prop_lst(struct list_head *list1, struct list_head *list2)
* This function will lookup the VSI entry in the XLT2 list and return
* the VSI group its associated with.
*/
-static enum ice_status
+static int
ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
{
if (!vsig || vsi >= ICE_MAX_VSI)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* As long as there's a default or valid VSIG associated with the input
* VSI, the functions returns a success. Any handling of VSIG will be
@@ -2470,7 +2609,7 @@ static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
* for, the list must match exactly, including the order in which the
* characteristics are listed.
*/
-static enum ice_status
+static int
ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
struct list_head *chs, u16 *vsig)
{
@@ -2484,7 +2623,7 @@ ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
return 0;
}
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
}
/**
@@ -2496,8 +2635,7 @@ ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
* The function will remove all VSIs associated with the input VSIG and move
* them to the DEFAULT_VSIG and mark the VSIG available.
*/
-static enum ice_status
-ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
+static int ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
{
struct ice_vsig_prof *dtmp, *del;
struct ice_vsig_vsi *vsi_cur;
@@ -2505,10 +2643,10 @@ ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
idx = vsig & ICE_VSIG_IDX_M;
if (idx >= ICE_MAX_VSIGS)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
@@ -2557,7 +2695,7 @@ ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
* The function will remove the input VSI from its VSI group and move it
* to the DEFAULT_VSIG.
*/
-static enum ice_status
+static int
ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
{
struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
@@ -2566,10 +2704,10 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
idx = vsig & ICE_VSIG_IDX_M;
if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
/* entry already in default VSIG, don't have to remove */
if (idx == ICE_DEFAULT_VSIG)
@@ -2577,7 +2715,7 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
if (!(*vsi_head))
- return ICE_ERR_CFG;
+ return -EIO;
vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
vsi_cur = (*vsi_head);
@@ -2594,7 +2732,7 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
/* verify if VSI was removed from group list */
if (!vsi_cur)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
vsi_cur->vsig = ICE_DEFAULT_VSIG;
vsi_cur->changed = 1;
@@ -2615,24 +2753,24 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
* move the entry to the DEFAULT_VSIG, update the original VSIG and
* then move entry to the new VSIG.
*/
-static enum ice_status
+static int
ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
{
struct ice_vsig_vsi *tmp;
- enum ice_status status;
u16 orig_vsig, idx;
+ int status;
idx = vsig & ICE_VSIG_IDX_M;
if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* if VSIG not in use and VSIG is not default type this VSIG
* doesn't exist.
*/
if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
vsig != ICE_DEFAULT_VSIG)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
if (status)
@@ -2738,7 +2876,7 @@ ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
* @masks: masks for FV
* @prof_id: receives the profile ID
*/
-static enum ice_status
+static int
ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
struct ice_fv_word *fv, u16 *masks, u8 *prof_id)
{
@@ -2749,7 +2887,7 @@ ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
* field vector and mask. This will cause rule interference.
*/
if (blk == ICE_BLK_FD)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
for (i = 0; i < (u8)es->count; i++) {
u16 off = i * es->fvw;
@@ -2765,7 +2903,7 @@ ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
return 0;
}
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
}
/**
@@ -2818,14 +2956,14 @@ static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
* This function allocates a new entry in a Profile ID TCAM for a specific
* block.
*/
-static enum ice_status
+static int
ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
u16 *tcam_idx)
{
u16 res_type;
if (!ice_tcam_ent_rsrc_type(blk, &res_type))
- return ICE_ERR_PARAM;
+ return -EINVAL;
return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx);
}
@@ -2838,13 +2976,13 @@ ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
*
* This function frees an entry in a Profile ID TCAM for a specific block.
*/
-static enum ice_status
+static int
ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
{
u16 res_type;
if (!ice_tcam_ent_rsrc_type(blk, &res_type))
- return ICE_ERR_PARAM;
+ return -EINVAL;
return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
}
@@ -2858,15 +2996,14 @@ ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
* This function allocates a new profile ID, which also corresponds to a Field
* Vector (Extraction Sequence) entry.
*/
-static enum ice_status
-ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
+static int ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
{
- enum ice_status status;
u16 res_type;
u16 get_prof;
+ int status;
if (!ice_prof_id_rsrc_type(blk, &res_type))
- return ICE_ERR_PARAM;
+ return -EINVAL;
status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
if (!status)
@@ -2883,14 +3020,13 @@ ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
*
* This function frees a profile ID, which also corresponds to a Field Vector.
*/
-static enum ice_status
-ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+static int ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
{
u16 tmp_prof_id = (u16)prof_id;
u16 res_type;
if (!ice_prof_id_rsrc_type(blk, &res_type))
- return ICE_ERR_PARAM;
+ return -EINVAL;
return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
}
@@ -2901,11 +3037,10 @@ ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
* @blk: the block from which to free the profile ID
* @prof_id: the profile ID for which to increment the reference count
*/
-static enum ice_status
-ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+static int ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
{
if (prof_id > hw->blk[blk].es.count)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw->blk[blk].es.ref_count[prof_id]++;
@@ -3022,17 +3157,17 @@ static void ice_init_all_prof_masks(struct ice_hw *hw)
* @mask: the 16-bit mask
* @mask_idx: variable to receive the mask index
*/
-static enum ice_status
+static int
ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask,
u16 *mask_idx)
{
bool found_unused = false, found_copy = false;
- enum ice_status status = ICE_ERR_MAX_LIMIT;
u16 unused_idx = 0, copy_idx = 0;
+ int status = -ENOSPC;
u16 i;
if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&hw->blk[blk].masks.lock);
@@ -3090,15 +3225,15 @@ err_ice_alloc_prof_mask:
* @blk: hardware block
* @mask_idx: index of mask
*/
-static enum ice_status
+static int
ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx)
{
if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (!(mask_idx >= hw->blk[blk].masks.first &&
mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count))
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
mutex_lock(&hw->blk[blk].masks.lock);
@@ -3132,14 +3267,14 @@ exit_ice_free_prof_mask:
* @blk: hardware block
* @prof_id: profile ID
*/
-static enum ice_status
+static int
ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id)
{
u32 mask_bm;
u16 i;
if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
- return ICE_ERR_PARAM;
+ return -EINVAL;
mask_bm = hw->blk[blk].es.mask_ena[prof_id];
for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++)
@@ -3194,7 +3329,7 @@ static void ice_shutdown_all_prof_masks(struct ice_hw *hw)
* @prof_id: profile ID
* @masks: masks
*/
-static enum ice_status
+static int
ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
u16 *masks)
{
@@ -3224,7 +3359,7 @@ ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
if (ena_mask & BIT(i))
ice_free_prof_mask(hw, blk, i);
- return ICE_ERR_OUT_OF_RANGE;
+ return -EIO;
}
/* enable the masks for this profile */
@@ -3266,11 +3401,11 @@ ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
* @blk: the block from which to free the profile ID
* @prof_id: the profile ID for which to decrement the reference count
*/
-static enum ice_status
+static int
ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
{
if (prof_id > hw->blk[blk].es.count)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (hw->blk[blk].es.ref_count[prof_id] > 0) {
if (!--hw->blk[blk].es.ref_count[prof_id]) {
@@ -3708,7 +3843,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
* ice_init_hw_tbls - init hardware table memory
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
+int ice_init_hw_tbls(struct ice_hw *hw)
{
u8 i;
@@ -3827,7 +3962,7 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
err:
ice_free_hw_tbls(hw);
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
/**
@@ -3843,7 +3978,7 @@ err:
* @nm_msk: never match mask
* @key: output of profile ID key
*/
-static enum ice_status
+static int
ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
@@ -3899,7 +4034,7 @@ ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
* @dc_msk: don't care mask
* @nm_msk: never match mask
*/
-static enum ice_status
+static int
ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
@@ -3907,7 +4042,7 @@ ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
{
struct ice_prof_tcam_entry;
- enum ice_status status;
+ int status;
status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
@@ -3926,7 +4061,7 @@ ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
* @vsig: VSIG to query
* @refs: pointer to variable to receive the reference count
*/
-static enum ice_status
+static int
ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
{
u16 idx = vsig & ICE_VSIG_IDX_M;
@@ -3935,7 +4070,7 @@ ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
*refs = 0;
if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
while (ptr) {
@@ -3976,7 +4111,7 @@ ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
* @bld: the update package buffer build to add to
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
struct ice_buf_build *bld, struct list_head *chgs)
{
@@ -3996,7 +4131,7 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
sizeof(p->es[0]));
if (!p)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
p->count = cpu_to_le16(1);
p->offset = cpu_to_le16(tmp->prof_id);
@@ -4014,7 +4149,7 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
* @bld: the update package buffer build to add to
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
struct ice_buf_build *bld, struct list_head *chgs)
{
@@ -4030,7 +4165,7 @@ ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
struct_size(p, entry, 1));
if (!p)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
p->count = cpu_to_le16(1);
p->entry[0].addr = cpu_to_le16(tmp->tcam_idx);
@@ -4050,7 +4185,7 @@ ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
* @bld: the update package buffer build to add to
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
struct list_head *chgs)
{
@@ -4066,7 +4201,7 @@ ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
struct_size(p, value, 1));
if (!p)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
p->count = cpu_to_le16(1);
p->offset = cpu_to_le16(tmp->ptype);
@@ -4082,7 +4217,7 @@ ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
* @bld: the update package buffer build to add to
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
struct list_head *chgs)
{
@@ -4101,7 +4236,7 @@ ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
struct_size(p, value, 1));
if (!p)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
p->count = cpu_to_le16(1);
p->offset = cpu_to_le16(tmp->vsi);
@@ -4121,18 +4256,18 @@ ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
* @blk: hardware block
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
struct list_head *chgs)
{
struct ice_buf_build *b;
struct ice_chs_chg *tmp;
- enum ice_status status;
u16 pkg_sects;
u16 xlt1 = 0;
u16 xlt2 = 0;
u16 tcam = 0;
u16 es = 0;
+ int status;
u16 sects;
/* count number of sections we need */
@@ -4164,7 +4299,7 @@ ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
/* Build update package buffer */
b = ice_pkg_buf_alloc(hw);
if (!b)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
status = ice_pkg_buf_reserve_section(b, sects);
if (status)
@@ -4201,13 +4336,13 @@ ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
*/
pkg_sects = ice_pkg_buf_get_active_sections(b);
if (!pkg_sects || pkg_sects != sects) {
- status = ICE_ERR_INVAL_SIZE;
+ status = -EINVAL;
goto error_tmp;
}
/* update package */
status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
- if (status == ICE_ERR_AQ_ERROR)
+ if (status == -EIO)
ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
error_tmp:
@@ -4273,7 +4408,7 @@ static const struct ice_fd_src_dst_pair ice_fd_pairs[] = {
* @prof_id: profile ID
* @es: extraction sequence (length of array is determined by the block)
*/
-static enum ice_status
+static int
ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
{
DECLARE_BITMAP(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
@@ -4328,7 +4463,7 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
/* check for room */
if (first_free + 1 < (s8)ice_fd_pairs[index].count)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
/* place in extraction sequence */
for (k = 0; k < ice_fd_pairs[index].count; k++) {
@@ -4338,7 +4473,7 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
ice_fd_pairs[index].off + (k * 2);
if (k > first_free)
- return ICE_ERR_OUT_OF_RANGE;
+ return -EIO;
/* keep track of non-relevant fields */
mask_sel |= BIT(first_free - k);
@@ -4449,7 +4584,7 @@ ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type,
* @attr: array of attributes that will be considered
* @attr_cnt: number of elements in the attribute array
*/
-static enum ice_status
+static int
ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
const struct ice_ptype_attributes *attr, u16 attr_cnt)
{
@@ -4465,11 +4600,11 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
&prof->attr[prof->ptg_cnt]);
if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
}
if (!found)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
return 0;
}
@@ -4490,7 +4625,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
* it will not be written until the first call to ice_add_flow that specifies
* the ID value used here.
*/
-enum ice_status
+int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
const struct ice_ptype_attributes *attr, u16 attr_cnt,
struct ice_fv_word *es, u16 *masks)
@@ -4498,9 +4633,9 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
struct ice_prof_map *prof;
- enum ice_status status;
u8 byte = 0;
u8 prof_id;
+ int status;
bitmap_zero(ptgs_used, ICE_XLT1_CNT);
@@ -4538,7 +4673,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
/* add profile info */
prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL);
if (!prof) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_ice_add_prof;
}
@@ -4581,7 +4716,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
*/
status = ice_add_prof_attrib(prof, ptg, ptype,
attr, attr_cnt);
- if (status == ICE_ERR_MAX_LIMIT)
+ if (status == -ENOSPC)
break;
if (status) {
/* This is simple a PTYPE/PTG with no
@@ -4658,14 +4793,13 @@ ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
* @blk: hardware block
* @idx: the index to release
*/
-static enum ice_status
-ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
+static int ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
{
/* Masks to invoke a never match entry */
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
- enum ice_status status;
+ int status;
/* write the TCAM entry */
status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
@@ -4685,11 +4819,11 @@ ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
* @blk: hardware block
* @prof: pointer to profile structure to remove
*/
-static enum ice_status
+static int
ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
struct ice_vsig_prof *prof)
{
- enum ice_status status;
+ int status;
u16 i;
for (i = 0; i < prof->tcam_count; i++)
@@ -4698,7 +4832,7 @@ ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
status = ice_rel_tcam_idx(hw, blk,
prof->tcam[i].tcam_idx);
if (status)
- return ICE_ERR_HW_TABLE;
+ return -EIO;
}
return 0;
@@ -4711,14 +4845,14 @@ ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
* @vsig: the VSIG to remove
* @chg: the change list
*/
-static enum ice_status
+static int
ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
struct list_head *chg)
{
u16 idx = vsig & ICE_VSIG_IDX_M;
struct ice_vsig_vsi *vsi_cur;
struct ice_vsig_prof *d, *t;
- enum ice_status status;
+ int status;
/* remove TCAM entries */
list_for_each_entry_safe(d, t,
@@ -4745,7 +4879,7 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
GFP_KERNEL);
if (!p)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
p->type = ICE_VSIG_REM;
p->orig_vsig = vsig;
@@ -4768,13 +4902,13 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
* @hdl: profile handle indicating which profile to remove
* @chg: list to receive a record of changes
*/
-static enum ice_status
+static int
ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
struct list_head *chg)
{
u16 idx = vsig & ICE_VSIG_IDX_M;
struct ice_vsig_prof *p, *t;
- enum ice_status status;
+ int status;
list_for_each_entry_safe(p, t,
&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
@@ -4792,7 +4926,7 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
return status;
}
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
}
/**
@@ -4801,12 +4935,11 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
* @blk: hardware block
* @id: profile tracking ID
*/
-static enum ice_status
-ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
+static int ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
{
struct ice_chs_chg *del, *tmp;
- enum ice_status status;
struct list_head chg;
+ int status;
u16 i;
INIT_LIST_HEAD(&chg);
@@ -4842,16 +4975,16 @@ err_ice_rem_flow_all:
* previously created through ice_add_prof. If any existing entries
* are associated with this profile, they will be removed as well.
*/
-enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
+int ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
{
struct ice_prof_map *pmap;
- enum ice_status status;
+ int status;
mutex_lock(&hw->blk[blk].es.prof_map_lock);
pmap = ice_search_prof_id(hw, blk, id);
if (!pmap) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto err_ice_rem_prof;
}
@@ -4878,20 +5011,20 @@ err_ice_rem_prof:
* @hdl: profile handle
* @chg: change list
*/
-static enum ice_status
+static int
ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
struct list_head *chg)
{
- enum ice_status status = 0;
struct ice_prof_map *map;
struct ice_chs_chg *p;
+ int status = 0;
u16 i;
mutex_lock(&hw->blk[blk].es.prof_map_lock);
/* Get the details on the profile specified by the handle ID */
map = ice_search_prof_id(hw, blk, hdl);
if (!map) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto err_ice_get_prof;
}
@@ -4901,7 +5034,7 @@ ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
GFP_KERNEL);
if (!p) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_ice_get_prof;
}
@@ -4933,7 +5066,7 @@ err_ice_get_prof:
*
* This routine makes a copy of the list of profiles in the specified VSIG.
*/
-static enum ice_status
+static int
ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
struct list_head *lst)
{
@@ -4961,7 +5094,7 @@ err_ice_get_profs_vsig:
devm_kfree(ice_hw_to_dev(hw), ent1);
}
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
/**
@@ -4971,25 +5104,25 @@ err_ice_get_profs_vsig:
* @lst: the list to be added to
* @hdl: profile handle of entry to add
*/
-static enum ice_status
+static int
ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
struct list_head *lst, u64 hdl)
{
- enum ice_status status = 0;
struct ice_prof_map *map;
struct ice_vsig_prof *p;
+ int status = 0;
u16 i;
mutex_lock(&hw->blk[blk].es.prof_map_lock);
map = ice_search_prof_id(hw, blk, hdl);
if (!map) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto err_ice_add_prof_to_lst;
}
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
if (!p) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_ice_add_prof_to_lst;
}
@@ -5018,17 +5151,17 @@ err_ice_add_prof_to_lst:
* @vsig: the VSIG to move the VSI to
* @chg: the change list
*/
-static enum ice_status
+static int
ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
struct list_head *chg)
{
- enum ice_status status;
struct ice_chs_chg *p;
u16 orig_vsig;
+ int status;
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
if (!p)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
if (!status)
@@ -5078,13 +5211,13 @@ ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg)
*
* This function appends an enable or disable TCAM entry in the change log
*/
-static enum ice_status
+static int
ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
u16 vsig, struct ice_tcam_inf *tcam,
struct list_head *chg)
{
- enum ice_status status;
struct ice_chs_chg *p;
+ int status;
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
@@ -5117,7 +5250,7 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
/* add TCAM to change list */
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
if (!p)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
tcam->ptg, vsig, 0, tcam->attr.flags,
@@ -5151,13 +5284,13 @@ err_ice_prof_tcam_ena_dis:
* @vsig: the VSIG for which to adjust profile priorities
* @chg: the change list
*/
-static enum ice_status
+static int
ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
struct list_head *chg)
{
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
struct ice_vsig_prof *t;
- enum ice_status status;
+ int status;
u16 idx;
bitmap_zero(ptgs_used, ICE_XLT1_CNT);
@@ -5222,7 +5355,7 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
* @rev: true to add entries to the end of the list
* @chg: the change list
*/
-static enum ice_status
+static int
ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
bool rev, struct list_head *chg)
{
@@ -5230,26 +5363,26 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
- enum ice_status status = 0;
struct ice_prof_map *map;
struct ice_vsig_prof *t;
struct ice_chs_chg *p;
u16 vsig_idx, i;
+ int status = 0;
/* Error, if this VSIG already has this profile */
if (ice_has_prof_vsig(hw, blk, vsig, hdl))
- return ICE_ERR_ALREADY_EXISTS;
+ return -EEXIST;
/* new VSIG profile structure */
t = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*t), GFP_KERNEL);
if (!t)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
mutex_lock(&hw->blk[blk].es.prof_map_lock);
/* Get the details on the profile specified by the handle ID */
map = ice_search_prof_id(hw, blk, hdl);
if (!map) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto err_ice_add_prof_id_vsig;
}
@@ -5264,7 +5397,7 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
/* add TCAM to change list */
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
if (!p) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_ice_add_prof_id_vsig;
}
@@ -5334,21 +5467,21 @@ err_ice_add_prof_id_vsig:
* @hdl: the profile handle of the profile that will be added to the VSIG
* @chg: the change list
*/
-static enum ice_status
+static int
ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
struct list_head *chg)
{
- enum ice_status status;
struct ice_chs_chg *p;
u16 new_vsig;
+ int status;
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
if (!p)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
new_vsig = ice_vsig_alloc(hw, blk);
if (!new_vsig) {
- status = ICE_ERR_HW_TABLE;
+ status = -EIO;
goto err_ice_create_prof_id_vsig;
}
@@ -5384,18 +5517,18 @@ err_ice_create_prof_id_vsig:
* @new_vsig: return of new VSIG
* @chg: the change list
*/
-static enum ice_status
+static int
ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
struct list_head *lst, u16 *new_vsig,
struct list_head *chg)
{
struct ice_vsig_prof *t;
- enum ice_status status;
+ int status;
u16 vsig;
vsig = ice_vsig_alloc(hw, blk);
if (!vsig)
- return ICE_ERR_HW_TABLE;
+ return -EIO;
status = ice_move_vsi(hw, blk, vsi, vsig, chg);
if (status)
@@ -5425,8 +5558,8 @@ static bool
ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
{
struct ice_vsig_prof *t;
- enum ice_status status;
struct list_head lst;
+ int status;
INIT_LIST_HEAD(&lst);
@@ -5456,14 +5589,14 @@ ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
* profile indicated by the ID parameter for the VSIs specified in the VSI
* array. Once successfully called, the flow will be enabled.
*/
-enum ice_status
+int
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
{
struct ice_vsig_prof *tmp1, *del1;
struct ice_chs_chg *tmp, *del;
struct list_head union_lst;
- enum ice_status status;
struct list_head chg;
+ int status;
u16 vsig;
INIT_LIST_HEAD(&union_lst);
@@ -5489,7 +5622,7 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
* scenario
*/
if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
- status = ICE_ERR_ALREADY_EXISTS;
+ status = -EEXIST;
goto err_ice_add_prof_id_flow;
}
@@ -5597,7 +5730,7 @@ err_ice_add_prof_id_flow:
* @lst: list to remove the profile from
* @hdl: the profile handle indicating the profile to remove
*/
-static enum ice_status
+static int
ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl)
{
struct ice_vsig_prof *ent, *tmp;
@@ -5609,7 +5742,7 @@ ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl)
return 0;
}
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
}
/**
@@ -5623,13 +5756,13 @@ ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl)
* profile indicated by the ID parameter for the VSIs specified in the VSI
* array. Once successfully called, the flow will be disabled.
*/
-enum ice_status
+int
ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
{
struct ice_vsig_prof *tmp1, *del1;
struct ice_chs_chg *tmp, *del;
struct list_head chg, copy;
- enum ice_status status;
+ int status;
u16 vsig;
INIT_LIST_HEAD(&copy);
@@ -5724,7 +5857,7 @@ ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
}
}
} else {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
}
/* update hardware tables */
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 344c2637facd..6cbc29bcb02f 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -18,10 +18,67 @@
#define ICE_PKG_CNT 4
-enum ice_status
+enum ice_ddp_state {
+ /* Indicates that this call to ice_init_pkg
+ * successfully loaded the requested DDP package
+ */
+ ICE_DDP_PKG_SUCCESS = 0,
+
+ /* Generic error for already loaded errors, it is mapped later to
+ * the more specific one (one of the next 3)
+ */
+ ICE_DDP_PKG_ALREADY_LOADED = -1,
+
+ /* Indicates that a DDP package of the same version has already been
+ * loaded onto the device by a previous call or by another PF
+ */
+ ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED = -2,
+
+ /* The device has a DDP package that is not supported by the driver */
+ ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = -3,
+
+ /* The device has a compatible package
+ * (but different from the request) already loaded
+ */
+ ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED = -4,
+
+ /* The firmware loaded on the device is not compatible with
+ * the DDP package loaded
+ */
+ ICE_DDP_PKG_FW_MISMATCH = -5,
+
+ /* The DDP package file is invalid */
+ ICE_DDP_PKG_INVALID_FILE = -6,
+
+ /* The version of the DDP package provided is higher than
+ * the driver supports
+ */
+ ICE_DDP_PKG_FILE_VERSION_TOO_HIGH = -7,
+
+ /* The version of the DDP package provided is lower than the
+ * driver supports
+ */
+ ICE_DDP_PKG_FILE_VERSION_TOO_LOW = -8,
+
+ /* The signature of the DDP package file provided is invalid */
+ ICE_DDP_PKG_FILE_SIGNATURE_INVALID = -9,
+
+ /* The DDP package file security revision is too low and not
+ * supported by firmware
+ */
+ ICE_DDP_PKG_FILE_REVISION_TOO_LOW = -10,
+
+ /* An error occurred in firmware while loading the DDP package */
+ ICE_DDP_PKG_LOAD_ERROR = -11,
+
+ /* Other errors */
+ ICE_DDP_PKG_ERR = -12
+};
+
+int
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_change_lock(struct ice_hw *hw);
-enum ice_status
+int
ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
u8 *prot, u16 *off);
void
@@ -29,32 +86,37 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type,
unsigned long *bm);
void
ice_init_prof_result_bm(struct ice_hw *hw);
-enum ice_status
+int
ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
unsigned long *bm, struct list_head *fv_list);
bool
-ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port);
+ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port,
+ enum ice_tunnel_type type);
int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
unsigned int idx, struct udp_tunnel_info *ti);
int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
unsigned int idx, struct udp_tunnel_info *ti);
-enum ice_status
+/* Rx parser PTYPE functions */
+bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype);
+
+/* XLT2/VSI group functions */
+int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
const struct ice_ptype_attributes *attr, u16 attr_cnt,
struct ice_fv_word *es, u16 *masks);
-enum ice_status
+int
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
-enum ice_status
+int
ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
-enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
-enum ice_status
+enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
+enum ice_ddp_state
ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
-enum ice_status ice_init_hw_tbls(struct ice_hw *hw);
+bool ice_is_init_pkg_successful(enum ice_ddp_state state);
+int ice_init_hw_tbls(struct ice_hw *hw);
void ice_free_seg(struct ice_hw *hw);
void ice_fill_blk_tbls(struct ice_hw *hw);
void ice_clear_hw_tbls(struct ice_hw *hw);
void ice_free_hw_tbls(struct ice_hw *hw);
-enum ice_status
-ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
+int ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
#endif /* _ICE_FLEX_PIPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index 0f572a36d021..fc087e0b5292 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -160,6 +160,7 @@ struct ice_meta_sect {
#define ICE_SID_CDID_KEY_BUILDER_RSS 47
#define ICE_SID_CDID_REDIR_RSS 48
+#define ICE_SID_RXPARSER_MARKER_PTYPE 55
#define ICE_SID_RXPARSER_BOOST_TCAM 56
#define ICE_SID_TXPARSER_BOOST_TCAM 66
@@ -201,6 +202,24 @@ enum ice_sect {
ICE_SECT_COUNT
};
+/* Packet Type (PTYPE) values */
+#define ICE_PTYPE_MAC_PAY 1
+#define ICE_PTYPE_IPV4_PAY 23
+#define ICE_PTYPE_IPV4_UDP_PAY 24
+#define ICE_PTYPE_IPV4_TCP_PAY 26
+#define ICE_PTYPE_IPV4_SCTP_PAY 27
+#define ICE_PTYPE_IPV6_PAY 89
+#define ICE_PTYPE_IPV6_UDP_PAY 90
+#define ICE_PTYPE_IPV6_TCP_PAY 92
+#define ICE_PTYPE_IPV6_SCTP_PAY 93
+#define ICE_MAC_IPV4_ESP 160
+#define ICE_MAC_IPV6_ESP 161
+#define ICE_MAC_IPV4_AH 162
+#define ICE_MAC_IPV6_AH 163
+#define ICE_MAC_IPV4_NAT_T_ESP 164
+#define ICE_MAC_IPV6_NAT_T_ESP 165
+#define ICE_MAC_IPV4_GTPU 329
+#define ICE_MAC_IPV6_GTPU 330
#define ICE_MAC_IPV4_GTPU_IPV4_FRAG 331
#define ICE_MAC_IPV4_GTPU_IPV4_PAY 332
#define ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY 333
@@ -221,6 +240,10 @@ enum ice_sect {
#define ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY 348
#define ICE_MAC_IPV6_GTPU_IPV6_TCP 349
#define ICE_MAC_IPV6_GTPU_IPV6_ICMPV6 350
+#define ICE_MAC_IPV4_PFCP_SESSION 352
+#define ICE_MAC_IPV6_PFCP_SESSION 354
+#define ICE_MAC_IPV4_L2TPV3 360
+#define ICE_MAC_IPV6_L2TPV3 361
/* Attributes that can modify PTYPE definitions.
*
@@ -329,6 +352,25 @@ struct ice_boost_tcam_section {
sizeof(struct ice_boost_tcam_entry), \
sizeof(struct ice_boost_tcam_entry))
+/* package Marker Ptype TCAM entry */
+struct ice_marker_ptype_tcam_entry {
+#define ICE_MARKER_PTYPE_TCAM_ADDR_MAX 1024
+ __le16 addr;
+ __le16 ptype;
+ u8 keys[20];
+};
+
+struct ice_marker_ptype_tcam_section {
+ __le16 count;
+ __le16 reserved;
+ struct ice_marker_ptype_tcam_entry tcam[];
+};
+
+#define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF \
+ ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_marker_ptype_tcam_section *)0, tcam, 1) - \
+ sizeof(struct ice_marker_ptype_tcam_entry), \
+ sizeof(struct ice_marker_ptype_tcam_entry))
+
struct ice_xlt1_section {
__le16 count;
__le16 offset;
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index f160672448a0..2c5332953679 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -609,8 +609,6 @@ struct ice_flow_prof_params {
ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
ICE_FLOW_SEG_HDR_NAT_T_ESP)
-#define ICE_FLOW_SEG_HDRS_L2_MASK \
- (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
#define ICE_FLOW_SEG_HDRS_L3_MASK \
(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_ARP)
#define ICE_FLOW_SEG_HDRS_L4_MASK \
@@ -625,8 +623,7 @@ struct ice_flow_prof_params {
* @segs: array of one or more packet segments that describe the flow
* @segs_cnt: number of packet segments provided
*/
-static enum ice_status
-ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
+static int ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
{
u8 i;
@@ -634,12 +631,12 @@ ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
/* Multiple L3 headers */
if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
!is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Multiple L4 headers */
if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
!is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
return 0;
@@ -700,8 +697,7 @@ static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
* This function identifies the packet types associated with the protocol
* headers being present in packet segments of the specified flow profile.
*/
-static enum ice_status
-ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
+static int ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
{
struct ice_flow_prof *prof;
u8 i;
@@ -898,7 +894,7 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
* field. It then allocates one or more extraction sequence entries for the
* given field, and fill the entries with protocol ID and offset information.
*/
-static enum ice_status
+static int
ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
u8 seg, enum ice_flow_field fld, u64 match)
{
@@ -1035,7 +1031,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
prot_id = ICE_PROT_GRE_OF;
break;
default:
- return ICE_ERR_NOT_IMPL;
+ return -EOPNOTSUPP;
}
/* Each extraction sequence entry is a word in size, and extracts a
@@ -1073,7 +1069,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
* does not exceed the block's capability
*/
if (params->es_cnt >= fv_words)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
/* some blocks require a reversed field vector layout */
if (hw->blk[params->blk].es.reverse)
@@ -1099,7 +1095,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
* @params: information about the flow to be processed
* @seg: index of packet segment whose raw fields are to be extracted
*/
-static enum ice_status
+static int
ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
u8 seg)
{
@@ -1112,12 +1108,12 @@ ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
if (params->prof->segs[seg].raws_cnt >
ARRAY_SIZE(params->prof->segs[seg].raws))
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
/* Offsets within the segment headers are not supported */
hdrs_sz = ice_flow_calc_seg_sz(params, seg);
if (!hdrs_sz)
- return ICE_ERR_PARAM;
+ return -EINVAL;
fv_words = hw->blk[params->blk].es.fvw;
@@ -1150,7 +1146,7 @@ ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
*/
if (params->es_cnt >= hw->blk[params->blk].es.count ||
params->es_cnt >= ICE_MAX_FV_WORDS)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
/* some blocks require a reversed field vector layout */
if (hw->blk[params->blk].es.reverse)
@@ -1176,12 +1172,12 @@ ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
* This function iterates through all matched fields in the given segments, and
* creates an extraction sequence for the fields.
*/
-static enum ice_status
+static int
ice_flow_create_xtrct_seq(struct ice_hw *hw,
struct ice_flow_prof_params *params)
{
struct ice_flow_prof *prof = params->prof;
- enum ice_status status = 0;
+ int status = 0;
u8 i;
for (i = 0; i < prof->segs_cnt; i++) {
@@ -1210,10 +1206,10 @@ ice_flow_create_xtrct_seq(struct ice_hw *hw,
* @hw: pointer to the HW struct
* @params: information about the flow to be processed
*/
-static enum ice_status
+static int
ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
{
- enum ice_status status;
+ int status;
status = ice_flow_proc_seg_hdrs(params);
if (status)
@@ -1229,7 +1225,7 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
status = 0;
break;
default:
- return ICE_ERR_NOT_IMPL;
+ return -EOPNOTSUPP;
}
return status;
@@ -1329,12 +1325,12 @@ ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
* @blk: classification stage
* @entry: flow entry to be removed
*/
-static enum ice_status
+static int
ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
struct ice_flow_entry *entry)
{
if (!entry)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
list_del(&entry->l_entry);
@@ -1355,27 +1351,27 @@ ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
*
* Assumption: the caller has acquired the lock to the profile list
*/
-static enum ice_status
+static int
ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
enum ice_flow_dir dir, u64 prof_id,
struct ice_flow_seg_info *segs, u8 segs_cnt,
struct ice_flow_prof **prof)
{
struct ice_flow_prof_params *params;
- enum ice_status status;
+ int status;
u8 i;
if (!prof)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
params->prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params->prof),
GFP_KERNEL);
if (!params->prof) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto free_params;
}
@@ -1432,11 +1428,11 @@ free_params:
*
* Assumption: the caller has acquired the lock to the profile list
*/
-static enum ice_status
+static int
ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
struct ice_flow_prof *prof)
{
- enum ice_status status;
+ int status;
/* Remove all remaining flow entries before removing the flow profile */
if (!list_empty(&prof->entries)) {
@@ -1474,11 +1470,11 @@ ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
* Assumption: the caller has acquired the lock to the profile list
* and the software VSI handle has been validated
*/
-static enum ice_status
+static int
ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
struct ice_flow_prof *prof, u16 vsi_handle)
{
- enum ice_status status = 0;
+ int status = 0;
if (!test_bit(vsi_handle, prof->vsis)) {
status = ice_add_prof_id_flow(hw, blk,
@@ -1505,11 +1501,11 @@ ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
* Assumption: the caller has acquired the lock to the profile list
* and the software VSI handle has been validated
*/
-static enum ice_status
+static int
ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
struct ice_flow_prof *prof, u16 vsi_handle)
{
- enum ice_status status = 0;
+ int status = 0;
if (test_bit(vsi_handle, prof->vsis)) {
status = ice_rem_prof_id_flow(hw, blk,
@@ -1536,21 +1532,21 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
* @segs_cnt: number of packet segments provided
* @prof: stores the returned flow profile added
*/
-enum ice_status
+int
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
struct ice_flow_prof **prof)
{
- enum ice_status status;
+ int status;
if (segs_cnt > ICE_FLOW_SEG_MAX)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
if (!segs_cnt)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (!segs)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
status = ice_flow_val_hdrs(segs, segs_cnt);
if (status)
@@ -1574,17 +1570,16 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
* @blk: the block for which the flow profile is to be removed
* @prof_id: unique ID of the flow profile to be removed
*/
-enum ice_status
-ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
+int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
{
struct ice_flow_prof *prof;
- enum ice_status status;
+ int status;
mutex_lock(&hw->fl_profs_locks[blk]);
prof = ice_flow_find_prof_id(hw, blk, prof_id);
if (!prof) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto out;
}
@@ -1608,34 +1603,34 @@ out:
* @data: pointer to a data buffer containing flow entry's match values/masks
* @entry_h: pointer to buffer that receives the new flow entry's handle
*/
-enum ice_status
+int
ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
void *data, u64 *entry_h)
{
struct ice_flow_entry *e = NULL;
struct ice_flow_prof *prof;
- enum ice_status status;
+ int status;
/* No flow entry data is expected for RSS */
if (!entry_h || (!data && blk != ICE_BLK_RSS))
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&hw->fl_profs_locks[blk]);
prof = ice_flow_find_prof_id(hw, blk, prof_id);
if (!prof) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
} else {
/* Allocate memory for the entry being added and associate
* the VSI to the found flow profile
*/
e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL);
if (!e)
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
else
status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
}
@@ -1654,7 +1649,7 @@ ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
case ICE_BLK_RSS:
break;
default:
- status = ICE_ERR_NOT_IMPL;
+ status = -EOPNOTSUPP;
goto out;
}
@@ -1680,15 +1675,14 @@ out:
* @blk: classification stage
* @entry_h: handle to the flow entry to be removed
*/
-enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
- u64 entry_h)
+int ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h)
{
struct ice_flow_entry *entry;
struct ice_flow_prof *prof;
- enum ice_status status = 0;
+ int status = 0;
if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
- return ICE_ERR_PARAM;
+ return -EINVAL;
entry = ICE_FLOW_ENTRY_PTR(entry_h);
@@ -1836,7 +1830,7 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
* header value to set flow field segment for further use in flow
* profile entry or removal.
*/
-static enum ice_status
+static int
ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
u32 flow_hdr)
{
@@ -1853,15 +1847,15 @@ ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
- return ICE_ERR_PARAM;
+ return -EINVAL;
val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
if (val && !is_power_of_2(val))
- return ICE_ERR_CFG;
+ return -EIO;
val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
if (val && !is_power_of_2(val))
- return ICE_ERR_CFG;
+ return -EIO;
return 0;
}
@@ -1899,14 +1893,14 @@ void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
* the VSI from that profile. If the flow profile has no VSIs it will
* be removed.
*/
-enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
+int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_prof *p, *t;
- enum ice_status status = 0;
+ int status = 0;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (list_empty(&hw->fl_profs[blk]))
return 0;
@@ -1966,7 +1960,7 @@ ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
*
* Assumption: lock has already been acquired for RSS list
*/
-static enum ice_status
+static int
ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
{
struct ice_rss_cfg *r, *rss_cfg;
@@ -1981,7 +1975,7 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg),
GFP_KERNEL);
if (!rss_cfg)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
@@ -2022,21 +2016,21 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
*
* Assumption: lock has already been acquired for RSS list
*/
-static enum ice_status
+static int
ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs, u8 segs_cnt)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_prof *prof = NULL;
struct ice_flow_seg_info *segs;
- enum ice_status status;
+ int status;
if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
- return ICE_ERR_PARAM;
+ return -EINVAL;
segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
if (!segs)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Construct the packet segment info from the hashed fields */
status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
@@ -2128,15 +2122,15 @@ exit:
* the input fields to hash on, the flow type and use the VSI number to add
* a flow entry to the profile.
*/
-enum ice_status
+int
ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs)
{
- enum ice_status status;
+ int status;
if (hashed_flds == ICE_HASH_INVALID ||
!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&hw->rss_locks);
status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
@@ -2159,18 +2153,18 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
*
* Assumption: lock has already been acquired for RSS list
*/
-static enum ice_status
+static int
ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs, u8 segs_cnt)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_seg_info *segs;
struct ice_flow_prof *prof;
- enum ice_status status;
+ int status;
segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
if (!segs)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Construct the packet segment info from the hashed fields */
status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
@@ -2182,7 +2176,7 @@ ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
vsi_handle,
ICE_FLOW_FIND_PROF_CHK_FLDS);
if (!prof) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto out;
}
@@ -2216,15 +2210,15 @@ out:
* removed. Calls are made to underlying flow s which will APIs
* turn build or update buffers for RSS XLT1 section.
*/
-enum ice_status __maybe_unused
+int __maybe_unused
ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs)
{
- enum ice_status status;
+ int status;
if (hashed_flds == ICE_HASH_INVALID ||
!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&hw->rss_locks);
status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
@@ -2279,20 +2273,19 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
* message, convert it to ICE-compatible values, and configure RSS flow
* profiles.
*/
-enum ice_status
-ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
+int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
{
- enum ice_status status = 0;
+ int status = 0;
u64 hash_flds;
if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Make sure no unsupported bits are specified */
if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS |
ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS))
- return ICE_ERR_CFG;
+ return -EIO;
hash_flds = avf_hash;
@@ -2352,7 +2345,7 @@ ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
}
if (rss_hash == ICE_HASH_INVALID)
- return ICE_ERR_OUT_OF_RANGE;
+ return -EIO;
status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
ICE_FLOW_SEG_HDR_NONE);
@@ -2368,13 +2361,13 @@ ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
*/
-enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
+int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
{
- enum ice_status status = 0;
struct ice_rss_cfg *r;
+ int status = 0;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&hw->rss_locks);
list_for_each_entry(r, &hw->rss_list_head, l_entry) {
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index 2a2d8c1536cb..d8782b28323e 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -383,18 +383,16 @@ struct ice_rss_cfg {
u32 packet_hdr;
};
-enum ice_status
+int
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
struct ice_flow_prof **prof);
-enum ice_status
-ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
-enum ice_status
+int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
+int
ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
u64 entry_id, u16 vsi, enum ice_flow_priority prio,
void *data, u64 *entry_h);
-enum ice_status
-ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h);
+int ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h);
void
ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
u16 val_loc, u16 mask_loc, u16 last_loc, bool range);
@@ -402,14 +400,13 @@ void
ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
u16 val_loc, u16 mask_loc);
void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status
-ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
-enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status
+int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
+int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
+int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
+int
ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs);
-enum ice_status
+int
ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs);
u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c
index c2e78eaf4ccb..94903b450345 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.c
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.c
@@ -47,12 +47,69 @@ ice_fltr_add_entry_to_list(struct device *dev, struct ice_fltr_info *info,
}
/**
+ * ice_fltr_set_vlan_vsi_promisc
+ * @hw: pointer to the hardware structure
+ * @vsi: the VSI being configured
+ * @promisc_mask: mask of promiscuous config bits
+ *
+ * Set VSI with all associated VLANs to given promiscuous mode(s)
+ */
+int
+ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
+ u8 promisc_mask)
+{
+ return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false);
+}
+
+/**
+ * ice_fltr_clear_vlan_vsi_promisc
+ * @hw: pointer to the hardware structure
+ * @vsi: the VSI being configured
+ * @promisc_mask: mask of promiscuous config bits
+ *
+ * Clear VSI with all associated VLANs to given promiscuous mode(s)
+ */
+int
+ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
+ u8 promisc_mask)
+{
+ return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true);
+}
+
+/**
+ * ice_fltr_clear_vsi_promisc - clear specified promiscuous mode(s)
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to clear mode
+ * @promisc_mask: mask of promiscuous config bits to clear
+ * @vid: VLAN ID to clear VLAN promiscuous
+ */
+int
+ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid)
+{
+ return ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
+}
+
+/**
+ * ice_fltr_set_vsi_promisc - set given VSI to given promiscuous mode(s)
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to configure
+ * @promisc_mask: mask of promiscuous config bits
+ * @vid: VLAN ID to set VLAN promiscuous
+ */
+int
+ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid)
+{
+ return ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
+}
+
+/**
* ice_fltr_add_mac_list - add list of MAC filters
* @vsi: pointer to VSI struct
* @list: list of filters
*/
-enum ice_status
-ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list)
+int ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_add_mac(&vsi->back->hw, list);
}
@@ -62,8 +119,7 @@ ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list)
* @vsi: pointer to VSI struct
* @list: list of filters
*/
-enum ice_status
-ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list)
+int ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_remove_mac(&vsi->back->hw, list);
}
@@ -73,8 +129,7 @@ ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list)
* @vsi: pointer to VSI struct
* @list: list of filters
*/
-static enum ice_status
-ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list)
+static int ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_add_vlan(&vsi->back->hw, list);
}
@@ -84,7 +139,7 @@ ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list)
* @vsi: pointer to VSI struct
* @list: list of filters
*/
-static enum ice_status
+static int
ice_fltr_remove_vlan_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_remove_vlan(&vsi->back->hw, list);
@@ -95,8 +150,7 @@ ice_fltr_remove_vlan_list(struct ice_vsi *vsi, struct list_head *list)
* @vsi: pointer to VSI struct
* @list: list of filters
*/
-static enum ice_status
-ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list)
+static int ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_add_eth_mac(&vsi->back->hw, list);
}
@@ -106,8 +160,7 @@ ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list)
* @vsi: pointer to VSI struct
* @list: list of filters
*/
-static enum ice_status
-ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list)
+static int ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_remove_eth_mac(&vsi->back->hw, list);
}
@@ -207,18 +260,17 @@ ice_fltr_add_eth_to_list(struct ice_vsi *vsi, struct list_head *list,
* @action: action to be performed on filter match
* @mac_action: pointer to add or remove MAC function
*/
-static enum ice_status
+static int
ice_fltr_prepare_mac(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action,
- enum ice_status (*mac_action)(struct ice_vsi *,
- struct list_head *))
+ int (*mac_action)(struct ice_vsi *, struct list_head *))
{
- enum ice_status result;
LIST_HEAD(tmp_list);
+ int result;
if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action)) {
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
result = mac_action(vsi, &tmp_list);
@@ -233,21 +285,21 @@ ice_fltr_prepare_mac(struct ice_vsi *vsi, const u8 *mac,
* @action: action to be performed on filter match
* @mac_action: pointer to add or remove MAC function
*/
-static enum ice_status
+static int
ice_fltr_prepare_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action,
- enum ice_status(*mac_action)
+ int(*mac_action)
(struct ice_vsi *, struct list_head *))
{
u8 broadcast[ETH_ALEN];
- enum ice_status result;
LIST_HEAD(tmp_list);
+ int result;
eth_broadcast_addr(broadcast);
if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action) ||
ice_fltr_add_mac_to_list(vsi, &tmp_list, broadcast, action)) {
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
result = mac_action(vsi, &tmp_list);
@@ -262,17 +314,16 @@ ice_fltr_prepare_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
* @action: action to be performed on filter match
* @vlan_action: pointer to add or remove VLAN function
*/
-static enum ice_status
+static int
ice_fltr_prepare_vlan(struct ice_vsi *vsi, u16 vlan_id,
enum ice_sw_fwd_act_type action,
- enum ice_status (*vlan_action)(struct ice_vsi *,
- struct list_head *))
+ int (*vlan_action)(struct ice_vsi *, struct list_head *))
{
- enum ice_status result;
LIST_HEAD(tmp_list);
+ int result;
if (ice_fltr_add_vlan_to_list(vsi, &tmp_list, vlan_id, action))
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
result = vlan_action(vsi, &tmp_list);
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
@@ -287,17 +338,16 @@ ice_fltr_prepare_vlan(struct ice_vsi *vsi, u16 vlan_id,
* @action: action to be performed on filter match
* @eth_action: pointer to add or remove ethertype function
*/
-static enum ice_status
+static int
ice_fltr_prepare_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
enum ice_sw_fwd_act_type action,
- enum ice_status (*eth_action)(struct ice_vsi *,
- struct list_head *))
+ int (*eth_action)(struct ice_vsi *, struct list_head *))
{
- enum ice_status result;
LIST_HEAD(tmp_list);
+ int result;
if (ice_fltr_add_eth_to_list(vsi, &tmp_list, ethertype, flag, action))
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
result = eth_action(vsi, &tmp_list);
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
@@ -310,8 +360,8 @@ ice_fltr_prepare_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
* @mac: MAC to add
* @action: action to be performed on filter match
*/
-enum ice_status ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac,
- enum ice_sw_fwd_act_type action)
+int ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action)
{
return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_add_mac_list);
}
@@ -322,7 +372,7 @@ enum ice_status ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac,
* @mac: MAC to add
* @action: action to be performed on filter match
*/
-enum ice_status
+int
ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action)
{
@@ -336,8 +386,8 @@ ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
* @mac: filter MAC to remove
* @action: action to remove
*/
-enum ice_status ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
- enum ice_sw_fwd_act_type action)
+int ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action)
{
return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_remove_mac_list);
}
@@ -348,8 +398,8 @@ enum ice_status ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
* @vlan_id: VLAN ID to add
* @action: action to be performed on filter match
*/
-enum ice_status ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vlan_id,
- enum ice_sw_fwd_act_type action)
+int ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vlan_id,
+ enum ice_sw_fwd_act_type action)
{
return ice_fltr_prepare_vlan(vsi, vlan_id, action,
ice_fltr_add_vlan_list);
@@ -361,8 +411,8 @@ enum ice_status ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vlan_id,
* @vlan_id: filter VLAN to remove
* @action: action to remove
*/
-enum ice_status ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vlan_id,
- enum ice_sw_fwd_act_type action)
+int ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vlan_id,
+ enum ice_sw_fwd_act_type action)
{
return ice_fltr_prepare_vlan(vsi, vlan_id, action,
ice_fltr_remove_vlan_list);
@@ -375,8 +425,8 @@ enum ice_status ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vlan_id,
* @flag: direction of packet to be filtered, Tx or Rx
* @action: action to be performed on filter match
*/
-enum ice_status ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
- enum ice_sw_fwd_act_type action)
+int ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
+ enum ice_sw_fwd_act_type action)
{
return ice_fltr_prepare_eth(vsi, ethertype, flag, action,
ice_fltr_add_eth_list);
@@ -389,8 +439,8 @@ enum ice_status ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
* @flag: direction of filter
* @action: action to remove
*/
-enum ice_status ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype,
- u16 flag, enum ice_sw_fwd_act_type action)
+int ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
+ enum ice_sw_fwd_act_type action)
{
return ice_fltr_prepare_eth(vsi, ethertype, flag, action,
ice_fltr_remove_eth_list);
@@ -406,17 +456,17 @@ enum ice_status ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype,
* @src: source VSI
* @new_flags: combinations of lb_en and lan_en
*/
-static enum ice_status
+static int
ice_fltr_update_rule_flags(struct ice_hw *hw, u16 rule_id, u16 recipe_id,
u32 act, u16 type, u16 src, u32 new_flags)
{
struct ice_aqc_sw_rules_elem *s_rule;
- enum ice_status err;
u32 flags_mask;
+ int err;
s_rule = kzalloc(ICE_SW_RULE_RX_TX_NO_HDR_SIZE, GFP_KERNEL);
if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
flags_mask = ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
act &= ~flags_mask;
@@ -465,7 +515,7 @@ static u32 ice_fltr_build_action(u16 vsi_id)
* Flags should be a combination of ICE_SINGLE_ACT_LB_ENABLE and
* ICE_SINGLE_ACT_LAN_ENABLE.
*/
-enum ice_status
+int
ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction,
u32 new_flags)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.h b/drivers/net/ethernet/intel/ice/ice_fltr.h
index 8eec4febead1..0e65fd021ad2 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.h
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.h
@@ -5,38 +5,52 @@
#define _ICE_FLTR_H_
void ice_fltr_free_list(struct device *dev, struct list_head *h);
-enum ice_status
+int
+ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
+ u8 promisc_mask);
+int
+ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
+ u8 promisc_mask);
+int
+ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid);
+int
+ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid);
+int
ice_fltr_add_mac_to_list(struct ice_vsi *vsi, struct list_head *list,
const u8 *mac, enum ice_sw_fwd_act_type action);
-enum ice_status
+int
ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action);
-enum ice_status
+int
ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action);
-enum ice_status
-ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list);
-enum ice_status
+int ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list);
+int
ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action);
-enum ice_status
-ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list);
+int ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list);
-enum ice_status
+int
ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vid,
enum ice_sw_fwd_act_type action);
-enum ice_status
+int
ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vid,
enum ice_sw_fwd_act_type action);
-enum ice_status
+int
ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
enum ice_sw_fwd_act_type action);
-enum ice_status
+int
ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
enum ice_sw_fwd_act_type action);
void ice_fltr_remove_all(struct ice_vsi *vsi);
-enum ice_status
+
+int
+ice_fltr_update_flags(struct ice_vsi *vsi, u16 rule_id, u16 recipe_id,
+ u32 new_flags);
+int
ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction,
u32 new_flags);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index f8601d5b0b19..ccdfd2f030d8 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -40,8 +40,8 @@ ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length)
struct device *dev = context->dev;
struct ice_pf *pf = priv->pf;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
u8 *package_data;
+ int status;
dev_dbg(dev, "Sending PLDM record package data to firmware\n");
@@ -54,9 +54,8 @@ ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length)
kfree(package_data);
if (status) {
- dev_err(dev, "Failed to send record package data to firmware, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ dev_err(dev, "Failed to send record package data to firmware, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to record package data to firmware");
return -EIO;
}
@@ -203,8 +202,8 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
struct device *dev = context->dev;
struct ice_pf *pf = priv->pf;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
size_t length;
+ int status;
switch (component->identifier) {
case NVM_COMP_ID_OROM:
@@ -240,9 +239,8 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
kfree(comp_tbl);
if (status) {
- dev_err(dev, "Failed to transfer component table to firmware, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ dev_err(dev, "Failed to transfer component table to firmware, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to transfer component table to firmware");
return -EIO;
}
@@ -277,7 +275,6 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
struct device *dev = ice_pf_to_dev(pf);
struct ice_rq_event_info event;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
u32 completion_offset;
int err;
@@ -286,11 +283,11 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
dev_dbg(dev, "Writing block of %u bytes for module 0x%02x at offset %u\n",
block_size, module, offset);
- status = ice_aq_update_nvm(hw, module, offset, block_size, block,
- last_cmd, 0, NULL);
- if (status) {
- dev_err(dev, "Failed to flash module 0x%02x with block of size %u at offset %u, err %s aq_err %s\n",
- module, block_size, offset, ice_stat_str(status),
+ err = ice_aq_update_nvm(hw, module, offset, block_size, block,
+ last_cmd, 0, NULL);
+ if (err) {
+ dev_err(dev, "Failed to flash module 0x%02x with block of size %u at offset %u, err %d aq_err %s\n",
+ module, block_size, offset, err,
ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to program flash module");
return -EIO;
@@ -445,7 +442,6 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
struct ice_rq_event_info event;
struct ice_hw *hw = &pf->hw;
struct devlink *devlink;
- enum ice_status status;
int err;
dev_dbg(dev, "Beginning erase of flash component '%s', module 0x%02x\n", component, module);
@@ -456,10 +452,10 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
devlink_flash_update_timeout_notify(devlink, "Erasing", component, ICE_FW_ERASE_TIMEOUT);
- status = ice_aq_erase_nvm(hw, module, NULL);
- if (status) {
- dev_err(dev, "Failed to erase %s (module 0x%02x), err %s aq_err %s\n",
- component, module, ice_stat_str(status),
+ err = ice_aq_erase_nvm(hw, module, NULL);
+ if (err) {
+ dev_err(dev, "Failed to erase %s (module 0x%02x), err %d aq_err %s\n",
+ component, module, err,
ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to erase flash module");
err = -EIO;
@@ -524,17 +520,15 @@ static int ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
struct device *dev = ice_pf_to_dev(pf);
struct ice_rq_event_info event;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
u16 completion_retval;
int err;
memset(&event, 0, sizeof(event));
- status = ice_nvm_write_activate(hw, activate_flags);
- if (status) {
- dev_err(dev, "Failed to switch active flash banks, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ err = ice_nvm_write_activate(hw, activate_flags);
+ if (err) {
+ dev_err(dev, "Failed to switch active flash banks, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to switch active flash banks");
return -EIO;
}
@@ -670,7 +664,6 @@ int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
struct ice_fwu_priv priv;
- enum ice_status status;
int err;
switch (preservation) {
@@ -692,13 +685,12 @@ int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
priv.pf = pf;
priv.activate_flags = preservation;
- status = ice_acquire_nvm(hw, ICE_RES_WRITE);
- if (status) {
- dev_err(dev, "Failed to acquire device flash lock, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ err = ice_acquire_nvm(hw, ICE_RES_WRITE);
+ if (err) {
+ dev_err(dev, "Failed to acquire device flash lock, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock");
- return -EIO;
+ return err;
}
err = pldmfw_flash_image(&priv.context, fw);
@@ -736,7 +728,6 @@ int ice_check_for_pending_update(struct ice_pf *pf, const char *component,
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw_dev_caps *dev_caps;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
u8 pending = 0;
int err;
@@ -749,11 +740,11 @@ int ice_check_for_pending_update(struct ice_pf *pf, const char *component,
* may have changed, e.g. if an update was previously completed and
* the system has not yet rebooted.
*/
- status = ice_discover_dev_caps(hw, dev_caps);
- if (status) {
+ err = ice_discover_dev_caps(hw, dev_caps);
+ if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to read device capabilities");
kfree(dev_caps);
- return -EIO;
+ return err;
}
if (dev_caps->common_cap.nvm_update_pending_nvm) {
@@ -798,13 +789,12 @@ int ice_check_for_pending_update(struct ice_pf *pf, const char *component,
"Canceling previous pending update",
component, 0, 0);
- status = ice_acquire_nvm(hw, ICE_RES_WRITE);
- if (status) {
- dev_err(dev, "Failed to acquire device flash lock, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ err = ice_acquire_nvm(hw, ICE_RES_WRITE);
+ if (err) {
+ dev_err(dev, "Failed to acquire device flash lock, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock");
- return -EIO;
+ return err;
}
pending |= ICE_AQC_NVM_REVERT_LAST_ACTIV;
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 09a3297cd63c..1999b12708de 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -291,7 +291,7 @@ void ice_vsi_delete(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_vsi_ctx *ctxt;
- enum ice_status status;
+ int status;
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
@@ -305,8 +305,8 @@ void ice_vsi_delete(struct ice_vsi *vsi)
status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
if (status)
- dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %s\n",
- vsi->vsi_num, ice_stat_str(status));
+ dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n",
+ vsi->vsi_num, status);
kfree(ctxt);
}
@@ -709,15 +709,15 @@ bool ice_is_aux_ena(struct ice_pf *pf)
static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
- enum ice_status status;
+ int status;
if (ice_is_safe_mode(pf))
return;
status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
if (status)
- dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %s\n",
- vsi->vsi_num, ice_stat_str(status));
+ dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n",
+ vsi->vsi_num, status);
}
/**
@@ -1545,8 +1545,8 @@ ice_vsi_cfg_rss_exit:
static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
- enum ice_status status;
struct device *dev;
+ int status;
dev = ice_pf_to_dev(pf);
if (ice_is_safe_mode(pf)) {
@@ -1557,8 +1557,8 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA);
if (status)
- dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %s\n",
- vsi->vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
+ vsi->vsi_num, status);
}
/**
@@ -1577,8 +1577,8 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
struct device *dev;
+ int status;
dev = ice_pf_to_dev(pf);
if (ice_is_safe_mode(pf)) {
@@ -1590,57 +1590,57 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
ICE_FLOW_SEG_HDR_IPV4);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
/* configure RSS for IPv6 with input set IPv6 src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
ICE_FLOW_SEG_HDR_IPV6);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
/* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4,
ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
/* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4,
ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
/* configure RSS for sctp4 with input set IP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
/* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6,
ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
/* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6,
ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
/* configure RSS for sctp6 with input set IPv6 src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
}
/**
@@ -1749,22 +1749,21 @@ ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action)
int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
{
struct ice_pf *pf = vsi->back;
- enum ice_status status;
struct device *dev;
- int err = 0;
+ int err;
dev = ice_pf_to_dev(pf);
- status = ice_fltr_remove_vlan(vsi, vid, ICE_FWD_TO_VSI);
- if (!status) {
+ err = ice_fltr_remove_vlan(vsi, vid, ICE_FWD_TO_VSI);
+ if (!err) {
vsi->num_vlan--;
- } else if (status == ICE_ERR_DOES_NOT_EXIST) {
- dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %s\n",
- vid, vsi->vsi_num, ice_stat_str(status));
+ } else if (err == -ENOENT) {
+ dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, error: %d\n",
+ vid, vsi->vsi_num, err);
+ err = 0;
} else {
- dev_err(dev, "Error removing VLAN %d on vsi %i error: %s\n",
- vid, vsi->vsi_num, ice_stat_str(status));
- err = -EIO;
+ dev_err(dev, "Error removing VLAN %d on vsi %i error: %d\n",
+ vid, vsi->vsi_num, err);
}
return err;
@@ -2105,8 +2104,7 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
{
struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx *ctxt;
- enum ice_status status;
- int ret = 0;
+ int ret;
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
@@ -2124,12 +2122,10 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
- status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
- if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- ret = -EIO;
+ ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (ret) {
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %s\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status));
goto out;
}
@@ -2148,8 +2144,7 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
{
struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx *ctxt;
- enum ice_status status;
- int ret = 0;
+ int ret;
/* do not allow modifying VLAN stripping when a port VLAN is configured
* on this VSI
@@ -2177,12 +2172,10 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
- status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
- if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %s aq_err %s\n",
- ena, ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- ret = -EIO;
+ ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (ret) {
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %s\n",
+ ena, ret, ice_aq_str(hw->adminq.sq_last_status));
goto out;
}
@@ -2324,10 +2317,9 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
if (status) {
- netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %s, aq_err = %s\n",
+ netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %s\n",
ena ? "En" : "Dis", vsi->idx, vsi->vsi_num,
- ice_stat_str(status),
- ice_aq_str(pf->hw.adminq.sq_last_status));
+ status, ice_aq_str(pf->hw.adminq.sq_last_status));
goto err_out;
}
@@ -2405,11 +2397,11 @@ clear_reg_idx:
*/
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
{
- enum ice_status (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag,
- enum ice_sw_fwd_act_type act);
+ int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag,
+ enum ice_sw_fwd_act_type act);
struct ice_pf *pf = vsi->back;
- enum ice_status status;
struct device *dev;
+ int status;
dev = ice_pf_to_dev(pf);
eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth;
@@ -2428,9 +2420,9 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
}
if (status)
- dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %s\n",
+ dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n",
create ? "adding" : "removing", tx ? "TX" : "RX",
- vsi->vsi_num, ice_stat_str(status));
+ vsi->vsi_num, status);
}
/**
@@ -2450,7 +2442,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
struct ice_port_info *port_info;
struct ice_pf *pf = vsi->back;
u32 agg_node_id_start = 0;
- enum ice_status status;
+ int status;
/* create (as needed) scheduler aggregator node and move VSI into
* corresponding aggregator node
@@ -2576,7 +2568,6 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct device *dev = ice_pf_to_dev(pf);
- enum ice_status status;
struct ice_vsi *vsi;
int ret, i;
@@ -2725,11 +2716,11 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
}
dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
- status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
- max_txqs);
- if (status) {
- dev_err(dev, "VSI %d failed lan queue config, error %s\n",
- vsi->vsi_num, ice_stat_str(status));
+ ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_txqs);
+ if (ret) {
+ dev_err(dev, "VSI %d failed lan queue config, error %d\n",
+ vsi->vsi_num, ret);
goto unroll_clear_rings;
}
@@ -3036,8 +3027,8 @@ void ice_napi_del(struct ice_vsi *vsi)
*/
int ice_vsi_release(struct ice_vsi *vsi)
{
- enum ice_status err;
struct ice_pf *pf;
+ int err;
if (!vsi->back)
return -ENODEV;
@@ -3273,7 +3264,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
int prev_num_q_vectors = 0;
struct ice_vf *vf = NULL;
enum ice_vsi_type vtype;
- enum ice_status status;
struct ice_pf *pf;
int ret, i;
@@ -3421,14 +3411,14 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
/* If MQPRIO is set, means channel code path, hence for main
* VSI's, use TC as 1
*/
- status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
+ ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
else
- status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
- vsi->tc_cfg.ena_tc, max_txqs);
+ ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
+ vsi->tc_cfg.ena_tc, max_txqs);
- if (status) {
- dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %s\n",
- vsi->vsi_num, ice_stat_str(status));
+ if (ret) {
+ dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %d\n",
+ vsi->vsi_num, ret);
if (init_vsi) {
ret = -EIO;
goto err_vectors;
@@ -3663,7 +3653,6 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_pf *pf = vsi->back;
struct ice_vsi_ctx *ctx;
- enum ice_status status;
struct device *dev;
int i, ret = 0;
u8 num_tc = 0;
@@ -3705,25 +3694,22 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
/* must to indicate which section of VSI context are being modified */
ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
- status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
- if (status) {
+ ret = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
+ if (ret) {
dev_info(dev, "Failed VSI Update\n");
- ret = -EIO;
goto out;
}
if (vsi->type == ICE_VSI_PF &&
test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
- status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1,
- max_txqs);
+ ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
else
- status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
- vsi->tc_cfg.ena_tc, max_txqs);
+ ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
+ vsi->tc_cfg.ena_tc, max_txqs);
- if (status) {
- dev_err(dev, "VSI %d failed TC config, error %s\n",
- vsi->vsi_num, ice_stat_str(status));
- ret = -EIO;
+ if (ret) {
+ dev_err(dev, "VSI %d failed TC config, error %d\n",
+ vsi->vsi_num, ret);
goto out;
}
ice_vsi_update_q_map(vsi, ctx);
@@ -3776,39 +3762,6 @@ void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes)
}
/**
- * ice_status_to_errno - convert from enum ice_status to Linux errno
- * @err: ice_status value to convert
- */
-int ice_status_to_errno(enum ice_status err)
-{
- switch (err) {
- case ICE_SUCCESS:
- return 0;
- case ICE_ERR_DOES_NOT_EXIST:
- return -ENOENT;
- case ICE_ERR_OUT_OF_RANGE:
- case ICE_ERR_AQ_ERROR:
- case ICE_ERR_AQ_TIMEOUT:
- case ICE_ERR_AQ_EMPTY:
- case ICE_ERR_AQ_FW_CRITICAL:
- return -EIO;
- case ICE_ERR_PARAM:
- case ICE_ERR_INVAL_SIZE:
- return -EINVAL;
- case ICE_ERR_NO_MEMORY:
- return -ENOMEM;
- case ICE_ERR_MAX_LIMIT:
- return -EAGAIN;
- case ICE_ERR_RESET_ONGOING:
- return -EBUSY;
- case ICE_ERR_AQ_FULL:
- return -ENOSPC;
- default:
- return -EINVAL;
- }
-}
-
-/**
* ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
* @sw: switch to check if its default forwarding VSI is free
*
@@ -3849,8 +3802,8 @@ bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
*/
int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
{
- enum ice_status status;
struct device *dev;
+ int status;
if (!sw || !vsi)
return -EINVAL;
@@ -3873,9 +3826,9 @@ int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX);
if (status) {
- dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %s\n",
- vsi->vsi_num, ice_stat_str(status));
- return -EIO;
+ dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n",
+ vsi->vsi_num, status);
+ return status;
}
sw->dflt_vsi = vsi;
@@ -3895,8 +3848,8 @@ int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
int ice_clear_dflt_vsi(struct ice_sw *sw)
{
struct ice_vsi *dflt_vsi;
- enum ice_status status;
struct device *dev;
+ int status;
if (!sw)
return -EINVAL;
@@ -3912,8 +3865,8 @@ int ice_clear_dflt_vsi(struct ice_sw *sw)
status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false,
ICE_FLTR_RX);
if (status) {
- dev_err(dev, "Failed to clear the default forwarding VSI %d, error %s\n",
- dflt_vsi->vsi_num, ice_stat_str(status));
+ dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n",
+ dflt_vsi->vsi_num, status);
return -EIO;
}
@@ -3987,8 +3940,8 @@ int ice_get_link_speed_kbps(struct ice_vsi *vsi)
int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate)
{
struct ice_pf *pf = vsi->back;
- enum ice_status status;
struct device *dev;
+ int status;
int speed;
dev = ice_pf_to_dev(pf);
@@ -4014,7 +3967,7 @@ int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate)
dev_err(dev, "failed to set min Tx rate(%llu Kbps) for %s %d\n",
min_tx_rate, ice_vsi_type_str(vsi->type),
vsi->idx);
- return -EIO;
+ return status;
}
dev_dbg(dev, "set min Tx rate(%llu Kbps) for %s\n",
@@ -4026,7 +3979,7 @@ int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate)
if (status) {
dev_err(dev, "failed to clear min Tx rate configuration for %s %d\n",
ice_vsi_type_str(vsi->type), vsi->idx);
- return -EIO;
+ return status;
}
dev_dbg(dev, "cleared min Tx rate configuration for %s %d\n",
@@ -4048,8 +4001,8 @@ int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate)
int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate)
{
struct ice_pf *pf = vsi->back;
- enum ice_status status;
struct device *dev;
+ int status;
int speed;
dev = ice_pf_to_dev(pf);
@@ -4075,7 +4028,7 @@ int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate)
dev_err(dev, "failed setting max Tx rate(%llu Kbps) for %s %d\n",
max_tx_rate, ice_vsi_type_str(vsi->type),
vsi->idx);
- return -EIO;
+ return status;
}
dev_dbg(dev, "set max Tx rate(%llu Kbps) for %s %d\n",
@@ -4087,7 +4040,7 @@ int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate)
if (status) {
dev_err(dev, "failed clearing max Tx rate configuration for %s %d\n",
ice_vsi_type_str(vsi->type), vsi->idx);
- return -EIO;
+ return status;
}
dev_dbg(dev, "cleared max Tx rate configuration for %s %d\n",
@@ -4107,7 +4060,7 @@ int ice_set_link(struct ice_vsi *vsi, bool ena)
struct device *dev = ice_pf_to_dev(vsi->back);
struct ice_port_info *pi = vsi->port_info;
struct ice_hw *hw = pi->hw;
- enum ice_status status;
+ int status;
if (vsi->type != ICE_VSI_PF)
return -EINVAL;
@@ -4119,16 +4072,16 @@ int ice_set_link(struct ice_vsi *vsi, bool ena)
* a success code. Return an error if FW returns an error code other
* than ICE_AQ_RC_EMODE
*/
- if (status == ICE_ERR_AQ_ERROR) {
+ if (status == -EIO) {
if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
- dev_warn(dev, "can't set link to %s, err %s aq_err %s. not fatal, continuing\n",
- (ena ? "ON" : "OFF"), ice_stat_str(status),
+ dev_warn(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n",
+ (ena ? "ON" : "OFF"), status,
ice_aq_str(hw->adminq.sq_last_status));
} else if (status) {
- dev_err(dev, "can't set link to %s, err %s aq_err %s\n",
- (ena ? "ON" : "OFF"), ice_stat_str(status),
+ dev_err(dev, "can't set link to %s, err %d aq_err %s\n",
+ (ena ? "ON" : "OFF"), status,
ice_aq_str(hw->adminq.sq_last_status));
- return -EIO;
+ return status;
}
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 6c803407b67d..b2ed189527d6 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -103,15 +103,11 @@ void ice_update_tx_ring_stats(struct ice_tx_ring *ring, u64 pkts, u64 bytes);
void ice_update_rx_ring_stats(struct ice_rx_ring *ring, u64 pkts, u64 bytes);
void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
-
-int ice_status_to_errno(enum ice_status err);
-
void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl);
void ice_write_itr(struct ice_ring_container *rc, u16 itr);
void ice_set_q_vector_intrl(struct ice_q_vector *q_vector);
-enum ice_status
-ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
+int ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
bool ice_is_safe_mode(struct ice_pf *pf);
bool ice_is_aux_ena(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 515ac8c4e891..0c425d24889f 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -155,7 +155,6 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
*/
static int ice_init_mac_fltr(struct ice_pf *pf)
{
- enum ice_status status;
struct ice_vsi *vsi;
u8 *perm_addr;
@@ -164,11 +163,7 @@ static int ice_init_mac_fltr(struct ice_pf *pf)
return -EINVAL;
perm_addr = vsi->port_info->mac.perm_addr;
- status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
- if (status)
- return -EIO;
-
- return 0;
+ return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
}
/**
@@ -237,36 +232,43 @@ static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
}
/**
- * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF
+ * ice_set_promisc - Enable promiscuous mode for a given PF
* @vsi: the VSI being configured
* @promisc_m: mask of promiscuous config bits
- * @set_promisc: enable or disable promisc flag request
*
*/
-static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
+static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
{
- struct ice_hw *hw = &vsi->back->hw;
- enum ice_status status = 0;
+ int status;
if (vsi->type != ICE_VSI_PF)
return 0;
- if (vsi->num_vlan > 1) {
- status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
- set_promisc);
- } else {
- if (set_promisc)
- status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
- 0);
- else
- status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
- 0);
- }
+ if (vsi->num_vlan > 1)
+ status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m);
+ else
+ status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0);
+ return status;
+}
- if (status)
- return -EIO;
+/**
+ * ice_clear_promisc - Disable promiscuous mode for a given PF
+ * @vsi: the VSI being configured
+ * @promisc_m: mask of promiscuous config bits
+ *
+ */
+static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
+{
+ int status;
- return 0;
+ if (vsi->type != ICE_VSI_PF)
+ return 0;
+
+ if (vsi->num_vlan > 1)
+ status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m);
+ else
+ status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0);
+ return status;
}
/**
@@ -282,10 +284,9 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
bool promisc_forced_on = false;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- enum ice_status status = 0;
u32 changed_flags = 0;
u8 promisc_m;
- int err = 0;
+ int err;
if (!vsi->netdev)
return -EINVAL;
@@ -315,25 +316,23 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
/* Remove MAC addresses in the unsync list */
- status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
+ err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
- if (status) {
+ if (err) {
netdev_err(netdev, "Failed to delete MAC filters\n");
/* if we failed because of alloc failures, just bail */
- if (status == ICE_ERR_NO_MEMORY) {
- err = -ENOMEM;
+ if (err == -ENOMEM)
goto out;
- }
}
/* Add MAC addresses in the sync list */
- status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
+ err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
ice_fltr_free_list(dev, &vsi->tmp_sync_list);
/* If filter is added successfully or already exists, do not go into
* 'if' condition and report it as error. Instead continue processing
* rest of the function.
*/
- if (status && status != ICE_ERR_ALREADY_EXISTS) {
+ if (err && err != -EEXIST) {
netdev_err(netdev, "Failed to add MAC filters\n");
/* If there is no more space for new umac filters, VSI
* should go into promiscuous mode. There should be some
@@ -346,10 +345,10 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
vsi->vsi_num);
} else {
- err = -EIO;
goto out;
}
}
+ err = 0;
/* check for changes in promiscuous modes */
if (changed_flags & IFF_ALLMULTI) {
if (vsi->current_netdev_flags & IFF_ALLMULTI) {
@@ -358,7 +357,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
else
promisc_m = ICE_MCAST_PROMISC_BITS;
- err = ice_cfg_promisc(vsi, promisc_m, true);
+ err = ice_set_promisc(vsi, promisc_m);
if (err) {
netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
vsi->vsi_num);
@@ -372,7 +371,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
else
promisc_m = ICE_MCAST_PROMISC_BITS;
- err = ice_cfg_promisc(vsi, promisc_m, false);
+ err = ice_clear_promisc(vsi, promisc_m);
if (err) {
netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
vsi->vsi_num);
@@ -396,6 +395,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
~IFF_PROMISC;
goto out_promisc;
}
+ err = 0;
ice_cfg_vlan_pruning(vsi, false);
}
} else {
@@ -695,12 +695,12 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
{
struct ice_aqc_get_phy_caps_data *caps;
const char *an_advertised;
- enum ice_status status;
const char *fec_req;
const char *speed;
const char *fec;
const char *fc;
const char *an;
+ int status;
if (!vsi)
return;
@@ -1020,10 +1020,10 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_phy_info *phy_info;
- enum ice_status status;
struct ice_vsi *vsi;
u16 old_link_speed;
bool old_link;
+ int status;
phy_info = &pi->phy;
phy_info->link_info_old = phy_info->link_info;
@@ -1036,8 +1036,8 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
*/
status = ice_update_link_info(pi);
if (status)
- dev_dbg(dev, "Failed to update link status on port %d, err %s aq_err %s\n",
- pi->lport, ice_stat_str(status),
+ dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
+ pi->lport, status,
ice_aq_str(pi->hw->adminq.sq_last_status));
ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
@@ -1407,15 +1407,15 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
return 0;
do {
- enum ice_status ret;
u16 opcode;
+ int ret;
ret = ice_clean_rq_elem(hw, cq, &event, &pending);
- if (ret == ICE_ERR_AQ_NO_WORK)
+ if (ret == -EALREADY)
break;
if (ret) {
- dev_err(dev, "%s Receive Queue event error %s\n", qtype,
- ice_stat_str(ret));
+ dev_err(dev, "%s Receive Queue event error %d\n", qtype,
+ ret);
break;
}
@@ -1866,19 +1866,17 @@ static int ice_init_nvm_phy_type(struct ice_port_info *pi)
{
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_pf *pf = pi->hw->back;
- enum ice_status status;
- int err = 0;
+ int err;
pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
return -ENOMEM;
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, pcaps,
- NULL);
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
+ pcaps, NULL);
- if (status) {
+ if (err) {
dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
- err = -EIO;
goto out;
}
@@ -1977,8 +1975,7 @@ static int ice_init_phy_user_cfg(struct ice_port_info *pi)
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_phy_info *phy = &pi->phy;
struct ice_pf *pf = pi->hw->back;
- enum ice_status status;
- int err = 0;
+ int err;
if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
return -EIO;
@@ -1988,14 +1985,13 @@ static int ice_init_phy_user_cfg(struct ice_port_info *pi)
return -ENOMEM;
if (ice_fw_supports_report_dflt_cfg(pi->hw))
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
- pcaps, NULL);
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
+ pcaps, NULL);
else
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
- pcaps, NULL);
- if (status) {
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
+ pcaps, NULL);
+ if (err) {
dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
- err = -EIO;
goto err_out;
}
@@ -2049,8 +2045,7 @@ static int ice_configure_phy(struct ice_vsi *vsi)
struct ice_aqc_set_phy_cfg_data *cfg;
struct ice_phy_info *phy = &pi->phy;
struct ice_pf *pf = vsi->back;
- enum ice_status status;
- int err = 0;
+ int err;
/* Ensure we have media as we cannot configure a medialess port */
if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
@@ -2070,12 +2065,11 @@ static int ice_configure_phy(struct ice_vsi *vsi)
return -ENOMEM;
/* Get current PHY config */
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
- NULL);
- if (status) {
- dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n",
- vsi->vsi_num, ice_stat_str(status));
- err = -EIO;
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
+ NULL);
+ if (err) {
+ dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
+ vsi->vsi_num, err);
goto done;
}
@@ -2089,15 +2083,14 @@ static int ice_configure_phy(struct ice_vsi *vsi)
/* Use PHY topology as baseline for configuration */
memset(pcaps, 0, sizeof(*pcaps));
if (ice_fw_supports_report_dflt_cfg(pi->hw))
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
- pcaps, NULL);
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
+ pcaps, NULL);
else
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
- pcaps, NULL);
- if (status) {
- dev_err(dev, "Failed to get PHY caps, VSI %d error %s\n",
- vsi->vsi_num, ice_stat_str(status));
- err = -EIO;
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
+ pcaps, NULL);
+ if (err) {
+ dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
+ vsi->vsi_num, err);
goto done;
}
@@ -2150,12 +2143,10 @@ static int ice_configure_phy(struct ice_vsi *vsi)
/* Enable link and link update */
cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
- status = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
- if (status) {
- dev_err(dev, "Failed to set phy config, VSI %d error %s\n",
- vsi->vsi_num, ice_stat_str(status));
- err = -EIO;
- }
+ err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
+ if (err)
+ dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
+ vsi->vsi_num, err);
kfree(cfg);
done:
@@ -2549,9 +2540,9 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
.vsi_map_offset = vsi->alloc_txq,
.mapping_mode = ICE_VSI_MAP_CONTIG
};
- enum ice_status status;
struct device *dev;
int i, v_idx;
+ int status;
dev = ice_pf_to_dev(pf);
vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
@@ -2605,8 +2596,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
- dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n",
- ice_stat_str(status));
+ dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
+ status);
goto clear_xdp_rings;
}
@@ -3520,7 +3511,7 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_vsi *vsi;
- int status = 0;
+ int status;
if (ice_is_reset_in_progress(pf->state))
return -EBUSY;
@@ -3533,10 +3524,8 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
INIT_LIST_HEAD(&vsi->ch_list);
status = ice_cfg_netdev(vsi);
- if (status) {
- status = -ENODEV;
+ if (status)
goto unroll_vsi_setup;
- }
/* netdev has to be configured before setting frame size */
ice_vsi_cfg_frame_size(vsi);
@@ -3560,14 +3549,13 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
if (status) {
dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n",
vsi->vsi_num, status);
- status = -EINVAL;
goto unroll_napi_add;
}
status = ice_init_mac_fltr(pf);
if (status)
goto free_cpu_rx_map;
- return status;
+ return 0;
free_cpu_rx_map:
ice_free_cpu_rx_rmap(vsi);
@@ -4023,8 +4011,8 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
{
struct ice_vsi *vsi = ice_get_main_vsi(pf);
struct ice_vsi_ctx *ctxt;
- enum ice_status status;
struct ice_hw *hw;
+ int status;
if (!vsi)
return;
@@ -4054,9 +4042,8 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
} else {
vsi->info.sec_flags = ctxt->info.sec_flags;
vsi->info.sw_flags2 = ctxt->info.sw_flags2;
@@ -4069,109 +4056,80 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
/**
* ice_log_pkg_init - log result of DDP package load
* @hw: pointer to hardware info
- * @status: status of package load
+ * @state: state of package load
*/
-static void
-ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
+static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
{
- struct ice_pf *pf = (struct ice_pf *)hw->back;
- struct device *dev = ice_pf_to_dev(pf);
+ struct ice_pf *pf = hw->back;
+ struct device *dev;
- switch (*status) {
- case ICE_SUCCESS:
- /* The package download AdminQ command returned success because
- * this download succeeded or ICE_ERR_AQ_NO_WORK since there is
- * already a package loaded on the device.
- */
- if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
- hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
- hw->pkg_ver.update == hw->active_pkg_ver.update &&
- hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
- !memcmp(hw->pkg_name, hw->active_pkg_name,
- sizeof(hw->pkg_name))) {
- if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST)
- dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
- hw->active_pkg_name,
- hw->active_pkg_ver.major,
- hw->active_pkg_ver.minor,
- hw->active_pkg_ver.update,
- hw->active_pkg_ver.draft);
- else
- dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
- hw->active_pkg_name,
- hw->active_pkg_ver.major,
- hw->active_pkg_ver.minor,
- hw->active_pkg_ver.update,
- hw->active_pkg_ver.draft);
- } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
- hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
- dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
- hw->active_pkg_name,
- hw->active_pkg_ver.major,
- hw->active_pkg_ver.minor,
- ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
- *status = ICE_ERR_NOT_SUPPORTED;
- } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
- hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
- dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
- hw->active_pkg_name,
- hw->active_pkg_ver.major,
- hw->active_pkg_ver.minor,
- hw->active_pkg_ver.update,
- hw->active_pkg_ver.draft,
- hw->pkg_name,
- hw->pkg_ver.major,
- hw->pkg_ver.minor,
- hw->pkg_ver.update,
- hw->pkg_ver.draft);
- } else {
- dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n");
- *status = ICE_ERR_NOT_SUPPORTED;
- }
+ dev = ice_pf_to_dev(pf);
+
+ switch (state) {
+ case ICE_DDP_PKG_SUCCESS:
+ dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
+ hw->active_pkg_name,
+ hw->active_pkg_ver.major,
+ hw->active_pkg_ver.minor,
+ hw->active_pkg_ver.update,
+ hw->active_pkg_ver.draft);
break;
- case ICE_ERR_FW_DDP_MISMATCH:
+ case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
+ dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
+ hw->active_pkg_name,
+ hw->active_pkg_ver.major,
+ hw->active_pkg_ver.minor,
+ hw->active_pkg_ver.update,
+ hw->active_pkg_ver.draft);
+ break;
+ case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
+ dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
+ hw->active_pkg_name,
+ hw->active_pkg_ver.major,
+ hw->active_pkg_ver.minor,
+ ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
+ break;
+ case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
+ dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
+ hw->active_pkg_name,
+ hw->active_pkg_ver.major,
+ hw->active_pkg_ver.minor,
+ hw->active_pkg_ver.update,
+ hw->active_pkg_ver.draft,
+ hw->pkg_name,
+ hw->pkg_ver.major,
+ hw->pkg_ver.minor,
+ hw->pkg_ver.update,
+ hw->pkg_ver.draft);
+ break;
+ case ICE_DDP_PKG_FW_MISMATCH:
dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n");
break;
- case ICE_ERR_BUF_TOO_SHORT:
- case ICE_ERR_CFG:
+ case ICE_DDP_PKG_INVALID_FILE:
dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
break;
- case ICE_ERR_NOT_SUPPORTED:
- /* Package File version not supported */
- if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ ||
- (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
- hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR))
- dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
- else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ ||
- (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
- hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR))
- dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
- ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
+ case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
+ dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
break;
- case ICE_ERR_AQ_ERROR:
- switch (hw->pkg_dwnld_status) {
- case ICE_AQ_RC_ENOSEC:
- case ICE_AQ_RC_EBADSIG:
- dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
- return;
- case ICE_AQ_RC_ESVN:
- dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
- return;
- case ICE_AQ_RC_EBADMAN:
- case ICE_AQ_RC_EBADBUF:
- dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
+ case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
+ dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
+ ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
+ break;
+ case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
+ dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
+ break;
+ case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
+ dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
+ break;
+ case ICE_DDP_PKG_LOAD_ERROR:
+ dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
/* poll for reset to complete */
if (ice_check_reset(hw))
dev_err(dev, "Error resetting device. Please reload the driver\n");
- return;
- default:
- break;
- }
- fallthrough;
- default:
- dev_err(dev, "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n",
- *status);
break;
+ case ICE_DDP_PKG_ERR:
+ default:
+ dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n");
}
}
@@ -4186,24 +4144,24 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
static void
ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
{
- enum ice_status status = ICE_ERR_PARAM;
+ enum ice_ddp_state state = ICE_DDP_PKG_ERR;
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
/* Load DDP Package */
if (firmware && !hw->pkg_copy) {
- status = ice_copy_and_init_pkg(hw, firmware->data,
- firmware->size);
- ice_log_pkg_init(hw, &status);
+ state = ice_copy_and_init_pkg(hw, firmware->data,
+ firmware->size);
+ ice_log_pkg_init(hw, state);
} else if (!firmware && hw->pkg_copy) {
/* Reload package during rebuild after CORER/GLOBR reset */
- status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
- ice_log_pkg_init(hw, &status);
+ state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
+ ice_log_pkg_init(hw, state);
} else {
dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
}
- if (status) {
+ if (!ice_is_init_pkg_successful(state)) {
/* Safe Mode */
clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
return;
@@ -4234,9 +4192,9 @@ static void ice_verify_cacheline_size(struct ice_pf *pf)
* ice_send_version - update firmware with driver version
* @pf: PF struct
*
- * Returns ICE_SUCCESS on success, else error code
+ * Returns 0 on success, else error code
*/
-static enum ice_status ice_send_version(struct ice_pf *pf)
+static int ice_send_version(struct ice_pf *pf)
{
struct ice_driver_ver dv;
@@ -4526,7 +4484,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
* true
*/
if (ice_is_safe_mode(pf)) {
- dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n");
/* we already got function/device capabilities but these don't
* reflect what the driver needs to do in safe mode. Instead of
* adding conditional logic everywhere to ignore these
@@ -4809,9 +4766,9 @@ static void ice_setup_mc_magic_wake(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
u8 mac_addr[ETH_ALEN];
struct ice_vsi *vsi;
+ int status;
u8 flags;
if (!pf->wol_ena)
@@ -4833,9 +4790,8 @@ static void ice_setup_mc_magic_wake(struct ice_pf *pf)
status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
if (status)
- dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
}
/**
@@ -5374,11 +5330,10 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
struct sockaddr *addr = pi;
- enum ice_status status;
u8 old_mac[ETH_ALEN];
u8 flags = 0;
- int err = 0;
u8 *mac;
+ int err;
mac = (u8 *)addr->sa_data;
@@ -5410,22 +5365,22 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
netif_addr_unlock_bh(netdev);
/* Clean up old MAC filter. Not an error if old filter doesn't exist */
- status = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
- if (status && status != ICE_ERR_DOES_NOT_EXIST) {
+ err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
+ if (err && err != -ENOENT) {
err = -EADDRNOTAVAIL;
goto err_update_filters;
}
/* Add filter for new MAC. If filter exists, return success */
- status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
- if (status == ICE_ERR_ALREADY_EXISTS)
+ err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
+ if (err == -EEXIST)
/* Although this MAC filter is already present in hardware it's
* possible in some cases (e.g. bonding) that dev_addr was
* modified outside of the driver and needs to be restored back
* to this value.
*/
netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
- else if (status)
+ else if (err)
/* error if the new filter addition failed */
err = -EADDRNOTAVAIL;
@@ -5444,10 +5399,10 @@ err_update_filters:
/* write new MAC address to the firmware */
flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
- status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
- if (status) {
- netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n",
- mac, ice_stat_str(status));
+ err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
+ if (err) {
+ netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
+ mac, err);
}
return 0;
}
@@ -5489,8 +5444,8 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- enum ice_status status;
u16 q_handle;
+ int status;
u8 tc;
/* Validate maxrate requested is within permitted range */
@@ -5510,13 +5465,11 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
else
status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
q_handle, ICE_MAX_BW, maxrate * 1000);
- if (status) {
- netdev_err(netdev, "Unable to set Tx max rate, error %s\n",
- ice_stat_str(status));
- return -EIO;
- }
+ if (status)
+ netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
+ status);
- return 0;
+ return status;
}
/**
@@ -5888,6 +5841,9 @@ static int ice_up_complete(struct ice_vsi *vsi)
netif_carrier_on(vsi->netdev);
}
+ /* clear this now, and the first stats read will be used as baseline */
+ vsi->stat_offsets_loaded = false;
+
ice_service_task_schedule(pf);
return 0;
@@ -5934,14 +5890,15 @@ ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats st
/**
* ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
* @vsi: the VSI to be updated
+ * @vsi_stats: the stats struct to be updated
* @rings: rings to work on
* @count: number of rings
*/
static void
-ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_tx_ring **rings,
- u16 count)
+ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
+ struct rtnl_link_stats64 *vsi_stats,
+ struct ice_tx_ring **rings, u16 count)
{
- struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
u16 i;
for (i = 0; i < count; i++) {
@@ -5965,15 +5922,13 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_tx_ring **rings,
*/
static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
{
- struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
+ struct rtnl_link_stats64 *vsi_stats;
u64 pkts, bytes;
int i;
- /* reset netdev stats */
- vsi_stats->tx_packets = 0;
- vsi_stats->tx_bytes = 0;
- vsi_stats->rx_packets = 0;
- vsi_stats->rx_bytes = 0;
+ vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
+ if (!vsi_stats)
+ return;
/* reset non-netdev (extended) stats */
vsi->tx_restart = 0;
@@ -5985,7 +5940,8 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
rcu_read_lock();
/* update Tx rings counters */
- ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq);
+ ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
+ vsi->num_txq);
/* update Rx rings counters */
ice_for_each_rxq(vsi, i) {
@@ -6000,10 +5956,17 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
/* update XDP Tx rings counters */
if (ice_is_xdp_ena_vsi(vsi))
- ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings,
+ ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
vsi->num_xdp_txq);
rcu_read_unlock();
+
+ vsi->net_stats.tx_packets = vsi_stats->tx_packets;
+ vsi->net_stats.tx_bytes = vsi_stats->tx_bytes;
+ vsi->net_stats.rx_packets = vsi_stats->rx_packets;
+ vsi->net_stats.rx_bytes = vsi_stats->rx_bytes;
+
+ kfree(vsi_stats);
}
/**
@@ -6540,7 +6503,6 @@ static void ice_vsi_release_all(struct ice_pf *pf)
static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
{
struct device *dev = ice_pf_to_dev(pf);
- enum ice_status status;
int i, err;
ice_for_each_vsi(pf, i) {
@@ -6558,12 +6520,11 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
}
/* replay filters for the VSI */
- status = ice_replay_vsi(&pf->hw, vsi->idx);
- if (status) {
- dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n",
- ice_stat_str(status), vsi->idx,
- ice_vsi_type_str(type));
- return -EIO;
+ err = ice_replay_vsi(&pf->hw, vsi->idx);
+ if (err) {
+ dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
+ err, vsi->idx, ice_vsi_type_str(type));
+ return err;
}
/* Re-map HW VSI number, using VSI handle that has been
@@ -6626,7 +6587,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- enum ice_status ret;
int err;
if (test_bit(ICE_DOWN, pf->state))
@@ -6634,10 +6594,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
- ret = ice_init_all_ctrlq(hw);
- if (ret) {
- dev_err(dev, "control queues init failed %s\n",
- ice_stat_str(ret));
+ err = ice_init_all_ctrlq(hw);
+ if (err) {
+ dev_err(dev, "control queues init failed %d\n", err);
goto err_init_ctrlq;
}
@@ -6651,10 +6610,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_load_pkg(NULL, pf);
}
- ret = ice_clear_pf_cfg(hw);
- if (ret) {
- dev_err(dev, "clear PF configuration failed %s\n",
- ice_stat_str(ret));
+ err = ice_clear_pf_cfg(hw);
+ if (err) {
+ dev_err(dev, "clear PF configuration failed %d\n", err);
goto err_init_ctrlq;
}
@@ -6666,21 +6624,21 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_clear_pxe_mode(hw);
- ret = ice_init_nvm(hw);
- if (ret) {
- dev_err(dev, "ice_init_nvm failed %s\n", ice_stat_str(ret));
+ err = ice_init_nvm(hw);
+ if (err) {
+ dev_err(dev, "ice_init_nvm failed %d\n", err);
goto err_init_ctrlq;
}
- ret = ice_get_caps(hw);
- if (ret) {
- dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret));
+ err = ice_get_caps(hw);
+ if (err) {
+ dev_err(dev, "ice_get_caps failed %d\n", err);
goto err_init_ctrlq;
}
- ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
- if (ret) {
- dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret));
+ err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
+ if (err) {
+ dev_err(dev, "set_mac_cfg failed %d\n", err);
goto err_init_ctrlq;
}
@@ -6763,10 +6721,10 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_update_pf_netdev_link(pf);
/* tell the firmware we are up */
- ret = ice_send_version(pf);
- if (ret) {
- dev_err(dev, "Rebuild failed due to error sending driver version: %s\n",
- ice_stat_str(ret));
+ err = ice_send_version(pf);
+ if (err) {
+ dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
+ err);
goto err_vsi_rebuild;
}
@@ -6947,78 +6905,6 @@ const char *ice_aq_str(enum ice_aq_err aq_err)
}
/**
- * ice_stat_str - convert status err code to a string
- * @stat_err: the status error code to convert
- */
-const char *ice_stat_str(enum ice_status stat_err)
-{
- switch (stat_err) {
- case ICE_SUCCESS:
- return "OK";
- case ICE_ERR_PARAM:
- return "ICE_ERR_PARAM";
- case ICE_ERR_NOT_IMPL:
- return "ICE_ERR_NOT_IMPL";
- case ICE_ERR_NOT_READY:
- return "ICE_ERR_NOT_READY";
- case ICE_ERR_NOT_SUPPORTED:
- return "ICE_ERR_NOT_SUPPORTED";
- case ICE_ERR_BAD_PTR:
- return "ICE_ERR_BAD_PTR";
- case ICE_ERR_INVAL_SIZE:
- return "ICE_ERR_INVAL_SIZE";
- case ICE_ERR_DEVICE_NOT_SUPPORTED:
- return "ICE_ERR_DEVICE_NOT_SUPPORTED";
- case ICE_ERR_RESET_FAILED:
- return "ICE_ERR_RESET_FAILED";
- case ICE_ERR_FW_API_VER:
- return "ICE_ERR_FW_API_VER";
- case ICE_ERR_NO_MEMORY:
- return "ICE_ERR_NO_MEMORY";
- case ICE_ERR_CFG:
- return "ICE_ERR_CFG";
- case ICE_ERR_OUT_OF_RANGE:
- return "ICE_ERR_OUT_OF_RANGE";
- case ICE_ERR_ALREADY_EXISTS:
- return "ICE_ERR_ALREADY_EXISTS";
- case ICE_ERR_NVM:
- return "ICE_ERR_NVM";
- case ICE_ERR_NVM_CHECKSUM:
- return "ICE_ERR_NVM_CHECKSUM";
- case ICE_ERR_BUF_TOO_SHORT:
- return "ICE_ERR_BUF_TOO_SHORT";
- case ICE_ERR_NVM_BLANK_MODE:
- return "ICE_ERR_NVM_BLANK_MODE";
- case ICE_ERR_IN_USE:
- return "ICE_ERR_IN_USE";
- case ICE_ERR_MAX_LIMIT:
- return "ICE_ERR_MAX_LIMIT";
- case ICE_ERR_RESET_ONGOING:
- return "ICE_ERR_RESET_ONGOING";
- case ICE_ERR_HW_TABLE:
- return "ICE_ERR_HW_TABLE";
- case ICE_ERR_DOES_NOT_EXIST:
- return "ICE_ERR_DOES_NOT_EXIST";
- case ICE_ERR_FW_DDP_MISMATCH:
- return "ICE_ERR_FW_DDP_MISMATCH";
- case ICE_ERR_AQ_ERROR:
- return "ICE_ERR_AQ_ERROR";
- case ICE_ERR_AQ_TIMEOUT:
- return "ICE_ERR_AQ_TIMEOUT";
- case ICE_ERR_AQ_FULL:
- return "ICE_ERR_AQ_FULL";
- case ICE_ERR_AQ_NO_WORK:
- return "ICE_ERR_AQ_NO_WORK";
- case ICE_ERR_AQ_EMPTY:
- return "ICE_ERR_AQ_EMPTY";
- case ICE_ERR_AQ_FW_CRITICAL:
- return "ICE_ERR_AQ_FW_CRITICAL";
- }
-
- return "ICE_ERR_UNKNOWN";
-}
-
-/**
* ice_set_rss_lut - Set RSS LUT
* @vsi: Pointer to VSI structure
* @lut: Lookup table
@@ -7030,7 +6916,7 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
{
struct ice_aq_get_set_rss_lut_params params = {};
struct ice_hw *hw = &vsi->back->hw;
- enum ice_status status;
+ int status;
if (!lut)
return -EINVAL;
@@ -7041,14 +6927,11 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
params.lut = lut;
status = ice_aq_set_rss_lut(hw, &params);
- if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- return -EIO;
- }
+ if (status)
+ dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
- return 0;
+ return status;
}
/**
@@ -7061,20 +6944,17 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
{
struct ice_hw *hw = &vsi->back->hw;
- enum ice_status status;
+ int status;
if (!seed)
return -EINVAL;
status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
- if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- return -EIO;
- }
+ if (status)
+ dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
- return 0;
+ return status;
}
/**
@@ -7089,7 +6969,7 @@ int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
{
struct ice_aq_get_set_rss_lut_params params = {};
struct ice_hw *hw = &vsi->back->hw;
- enum ice_status status;
+ int status;
if (!lut)
return -EINVAL;
@@ -7100,14 +6980,11 @@ int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
params.lut = lut;
status = ice_aq_get_rss_lut(hw, &params);
- if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- return -EIO;
- }
+ if (status)
+ dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
- return 0;
+ return status;
}
/**
@@ -7120,20 +6997,17 @@ int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
{
struct ice_hw *hw = &vsi->back->hw;
- enum ice_status status;
+ int status;
if (!seed)
return -EINVAL;
status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
- if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- return -EIO;
- }
+ if (status)
+ dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
- return 0;
+ return status;
}
/**
@@ -7174,8 +7048,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
struct ice_aqc_vsi_props *vsi_props;
struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx *ctxt;
- enum ice_status status;
- int ret = 0;
+ int ret;
vsi_props = &vsi->info;
@@ -7193,12 +7066,10 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
- status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
- if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n",
- bmode, ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- ret = -EIO;
+ ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (ret) {
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
+ bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
goto out;
}
/* Update sw flags for book keeping */
@@ -7230,7 +7101,6 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
struct ice_pf *pf = np->vsi->back;
struct nlattr *attr, *br_spec;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
struct ice_sw *pf_sw;
int rem, v, err = 0;
@@ -7264,14 +7134,14 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
/* Update the unicast switch filter rules for the corresponding
* switch of the netdev
*/
- status = ice_update_sw_rule_bridge_mode(hw);
- if (status) {
- netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n",
- mode, ice_stat_str(status),
+ err = ice_update_sw_rule_bridge_mode(hw);
+ if (err) {
+ netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
+ mode, err,
ice_aq_str(hw->adminq.sq_last_status));
/* revert hw->evb_veb */
hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
- return -EIO;
+ return err;
}
pf_sw->bridge_mode = mode;
@@ -8446,7 +8316,6 @@ int ice_open_internal(struct net_device *netdev)
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_port_info *pi;
- enum ice_status status;
int err;
if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
@@ -8457,11 +8326,10 @@ int ice_open_internal(struct net_device *netdev)
netif_carrier_off(netdev);
pi = vsi->port_info;
- status = ice_update_link_info(pi);
- if (status) {
- netdev_err(netdev, "Failed to get link info, error %s\n",
- ice_stat_str(status));
- return -EIO;
+ err = ice_update_link_info(pi);
+ if (err) {
+ netdev_err(netdev, "Failed to get link info, error %d\n", err);
+ return err;
}
ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index fee37a5844cf..2d95fe533d1e 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -16,7 +16,7 @@
*
* Read the NVM using the admin queue commands (0x0701)
*/
-static enum ice_status
+static int
ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
void *data, bool last_command, bool read_shadow_ram,
struct ice_sq_cd *cd)
@@ -27,7 +27,7 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
cmd = &desc.params.nvm;
if (offset > ICE_AQC_NVM_MAX_OFFSET)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read);
@@ -60,21 +60,21 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
* Returns a status code on failure. Note that the data pointer may be
* partially updated if some reads succeed before a failure.
*/
-enum ice_status
+int
ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
bool read_shadow_ram)
{
- enum ice_status status;
u32 inlen = *length;
u32 bytes_read = 0;
bool last_cmd;
+ int status;
*length = 0;
/* Verify the length of the read if this is for the Shadow RAM */
if (read_shadow_ram && ((offset + inlen) > (hw->flash.sr_words * 2u))) {
ice_debug(hw, ICE_DBG_NVM, "NVM error: requested offset is beyond Shadow RAM limit\n");
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
do {
@@ -119,7 +119,7 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
*
* Update the NVM using the admin queue commands (0x0703)
*/
-enum ice_status
+int
ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command, u8 command_flags,
struct ice_sq_cd *cd)
@@ -131,7 +131,7 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
/* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write);
@@ -158,8 +158,7 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
*
* Erase the NVM sector using the admin queue commands (0x0702)
*/
-enum ice_status
-ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
+int ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
struct ice_aqc_nvm *cmd;
@@ -184,12 +183,11 @@ ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
*
* Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm.
*/
-static enum ice_status
-ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
+static int ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
{
u32 bytes = sizeof(u16);
- enum ice_status status;
__le16 data_local;
+ int status;
/* Note that ice_read_flat_nvm takes into account the 4Kb AdminQ and
* Shadow RAM sector restrictions necessary when reading from the NVM.
@@ -210,8 +208,7 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
*
* This function will request NVM ownership.
*/
-enum ice_status
-ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
+int ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
{
if (hw->flash.blank_nvm_mode)
return 0;
@@ -318,18 +315,18 @@ static u32 ice_get_flash_bank_offset(struct ice_hw *hw, enum ice_bank_select ban
* hw->flash.banks data being setup by ice_determine_active_flash_banks()
* during initialization.
*/
-static enum ice_status
+static int
ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module,
u32 offset, u8 *data, u32 length)
{
- enum ice_status status;
+ int status;
u32 start;
start = ice_get_flash_bank_offset(hw, bank, module);
if (!start) {
ice_debug(hw, ICE_DBG_NVM, "Unable to calculate flash bank offset for module 0x%04x\n",
module);
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
status = ice_acquire_nvm(hw, ICE_RES_READ);
@@ -353,11 +350,11 @@ ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module,
* Read the specified word from the active NVM module. This includes the CSS
* header at the start of the NVM module.
*/
-static enum ice_status
+static int
ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{
- enum ice_status status;
__le16 data_local;
+ int status;
status = ice_read_flash_module(hw, bank, ICE_SR_1ST_NVM_BANK_PTR, offset * sizeof(u16),
(__force u8 *)&data_local, sizeof(u16));
@@ -377,7 +374,7 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1
* Read the specified word from the copy of the Shadow RAM found in the
* specified NVM module.
*/
-static enum ice_status
+static int
ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{
return ice_read_nvm_module(hw, bank, ICE_NVM_SR_COPY_WORD_OFFSET + offset, data);
@@ -392,11 +389,11 @@ ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u
*
* Read a word from the specified netlist bank.
*/
-static enum ice_status
+static int
ice_read_netlist_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{
- enum ice_status status;
__le16 data_local;
+ int status;
status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR, offset * sizeof(u16),
(__force u8 *)&data_local, sizeof(u16));
@@ -414,9 +411,9 @@ ice_read_netlist_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset
*
* Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq.
*/
-enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
+int ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
{
- enum ice_status status;
+ int status;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (!status) {
@@ -438,13 +435,13 @@ enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
* Area (PFA) and returns the TLV pointer and length. The caller can
* use these to read the variable length TLV value.
*/
-enum ice_status
+int
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type)
{
- enum ice_status status;
u16 pfa_len, pfa_ptr;
u16 next_tlv;
+ int status;
status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
if (status) {
@@ -482,7 +479,7 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
*module_tlv_len = tlv_len;
return 0;
}
- return ICE_ERR_INVAL_SIZE;
+ return -EINVAL;
}
/* Check next TLV, i.e. current TLV pointer + length + 2 words
* (for current TLV's type and length)
@@ -490,7 +487,7 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
next_tlv = next_tlv + tlv_len + 2;
}
/* Module does not exist */
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
}
/**
@@ -501,12 +498,11 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
*
* Reads the part number string from the NVM.
*/
-enum ice_status
-ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
+int ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
{
u16 pba_tlv, pba_tlv_len;
- enum ice_status status;
u16 pba_word, pba_size;
+ int status;
u16 i;
status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
@@ -525,7 +521,7 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
if (pba_tlv_len < pba_size) {
ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n");
- return ICE_ERR_INVAL_SIZE;
+ return -EINVAL;
}
/* Subtract one to get PBA word count (PBA Size word is included in
@@ -534,7 +530,7 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
pba_size--;
if (pba_num_size < (((u32)pba_size * 2) + 1)) {
ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n");
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
for (i = 0; i < pba_size; i++) {
@@ -561,11 +557,11 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
* Read the NVM EETRACK ID and map version of the main NVM image bank, filling
* in the NVM info structure.
*/
-static enum ice_status
+static int
ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nvm_info *nvm)
{
u16 eetrack_lo, eetrack_hi, ver;
- enum ice_status status;
+ int status;
status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_DEV_STARTER_VER, &ver);
if (status) {
@@ -601,7 +597,7 @@ ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nv
* inactive NVM bank. Used to access version data for a pending update that
* has not yet been activated.
*/
-enum ice_status ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm)
+int ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm)
{
return ice_get_nvm_ver_info(hw, ICE_INACTIVE_FLASH_BANK, nvm);
}
@@ -615,12 +611,12 @@ enum ice_status ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info
* Searches through the Option ROM flash contents to locate the CIVD data for
* the image.
*/
-static enum ice_status
+static int
ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
struct ice_orom_civd_info *civd)
{
struct ice_orom_civd_info tmp;
- enum ice_status status;
+ int status;
u32 offset;
/* The CIVD section is located in the Option ROM aligned to 512 bytes.
@@ -650,14 +646,14 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
if (sum) {
ice_debug(hw, ICE_DBG_NVM, "Found CIVD data with invalid checksum of %u\n",
sum);
- return ICE_ERR_NVM;
+ return -EIO;
}
*civd = tmp;
return 0;
}
- return ICE_ERR_NVM;
+ return -EIO;
}
/**
@@ -669,12 +665,12 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
* Read Option ROM version and security revision from the Option ROM flash
* section.
*/
-static enum ice_status
+static int
ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_orom_info *orom)
{
struct ice_orom_civd_info civd;
- enum ice_status status;
u32 combo_ver;
+ int status;
status = ice_get_orom_civd_data(hw, bank, &civd);
if (status) {
@@ -700,7 +696,7 @@ ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_o
* section of flash. Used to access version data for a pending update that has
* not yet been activated.
*/
-enum ice_status ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom)
+int ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom)
{
return ice_get_orom_ver_info(hw, ICE_INACTIVE_FLASH_BANK, orom);
}
@@ -715,13 +711,13 @@ enum ice_status ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_inf
* Topology section to find the Netlist ID block and extract the relevant
* information into the netlist version structure.
*/
-static enum ice_status
+static int
ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
struct ice_netlist_info *netlist)
{
u16 module_id, length, node_count, i;
- enum ice_status status;
u16 *id_blk;
+ int status;
status = ice_read_netlist_module(hw, bank, ICE_NETLIST_TYPE_OFFSET, &module_id);
if (status)
@@ -730,7 +726,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
if (module_id != ICE_NETLIST_LINK_TOPO_MOD_ID) {
ice_debug(hw, ICE_DBG_NVM, "Expected netlist module_id ID of 0x%04x, but got 0x%04x\n",
ICE_NETLIST_LINK_TOPO_MOD_ID, module_id);
- return ICE_ERR_NVM;
+ return -EIO;
}
status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_MODULE_LEN, &length);
@@ -741,7 +737,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
if (length < ICE_NETLIST_ID_BLK_SIZE) {
ice_debug(hw, ICE_DBG_NVM, "Netlist Link Topology module too small. Expected at least %u words, but got %u words.\n",
ICE_NETLIST_ID_BLK_SIZE, length);
- return ICE_ERR_NVM;
+ return -EIO;
}
status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_NODE_COUNT, &node_count);
@@ -751,7 +747,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
id_blk = kcalloc(ICE_NETLIST_ID_BLK_SIZE, sizeof(*id_blk), GFP_KERNEL);
if (!id_blk)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Read out the entire Netlist ID Block at once. */
status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR,
@@ -791,7 +787,7 @@ exit_error:
* extract version data of a pending flash update in order to display the
* version data.
*/
-enum ice_status ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist)
+int ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist)
{
return ice_get_netlist_info(hw, ICE_INACTIVE_FLASH_BANK, netlist);
}
@@ -804,10 +800,10 @@ enum ice_status ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netli
* the actual size is smaller. Use bisection to determine the accessible size
* of flash memory.
*/
-static enum ice_status ice_discover_flash_size(struct ice_hw *hw)
+static int ice_discover_flash_size(struct ice_hw *hw)
{
u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1;
- enum ice_status status;
+ int status;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status)
@@ -819,7 +815,7 @@ static enum ice_status ice_discover_flash_size(struct ice_hw *hw)
u8 data;
status = ice_read_flat_nvm(hw, offset, &len, &data, false);
- if (status == ICE_ERR_AQ_ERROR &&
+ if (status == -EIO &&
hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) {
ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n",
__func__, offset);
@@ -859,10 +855,9 @@ err_read_flat_nvm:
* sector size by using the highest bit. The reported pointer value will be in
* bytes, intended for flat NVM reads.
*/
-static enum ice_status
-ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer)
+static int ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer)
{
- enum ice_status status;
+ int status;
u16 value;
status = ice_read_sr_word(hw, offset, &value);
@@ -891,10 +886,9 @@ ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer)
* Each area size word is specified in 4KB sector units. This function reports
* the size in bytes, intended for flat NVM reads.
*/
-static enum ice_status
-ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size)
+static int ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size)
{
- enum ice_status status;
+ int status;
u16 value;
status = ice_read_sr_word(hw, offset, &value);
@@ -917,12 +911,11 @@ ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size)
* structure for later use in order to calculate the correct offset to read
* from the active module.
*/
-static enum ice_status
-ice_determine_active_flash_banks(struct ice_hw *hw)
+static int ice_determine_active_flash_banks(struct ice_hw *hw)
{
struct ice_bank_info *banks = &hw->flash.banks;
- enum ice_status status;
u16 ctrl_word;
+ int status;
status = ice_read_sr_word(hw, ICE_SR_NVM_CTRL_WORD, &ctrl_word);
if (status) {
@@ -933,7 +926,7 @@ ice_determine_active_flash_banks(struct ice_hw *hw)
/* Check that the control word indicates validity */
if ((ctrl_word & ICE_SR_CTRL_WORD_1_M) >> ICE_SR_CTRL_WORD_1_S != ICE_SR_CTRL_WORD_VALID) {
ice_debug(hw, ICE_DBG_NVM, "Shadow RAM control word is invalid\n");
- return ICE_ERR_CFG;
+ return -EIO;
}
if (!(ctrl_word & ICE_SR_CTRL_WORD_NVM_BANK))
@@ -997,12 +990,12 @@ ice_determine_active_flash_banks(struct ice_hw *hw)
* This function reads and populates NVM settings such as Shadow RAM size,
* max_timeout, and blank_nvm_mode
*/
-enum ice_status ice_init_nvm(struct ice_hw *hw)
+int ice_init_nvm(struct ice_hw *hw)
{
struct ice_flash_info *flash = &hw->flash;
- enum ice_status status;
u32 fla, gens_stat;
u8 sr_size;
+ int status;
/* The SR size is stored regardless of the NVM programming mode
* as the blank mode may be used in the factory line.
@@ -1021,7 +1014,7 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
/* Blank programming mode */
flash->blank_nvm_mode = true;
ice_debug(hw, ICE_DBG_NVM, "NVM init error: unsupported blank mode.\n");
- return ICE_ERR_NVM_BLANK_MODE;
+ return -EIO;
}
status = ice_discover_flash_size(hw);
@@ -1060,11 +1053,11 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
*
* Verify NVM PFA checksum validity (0x0706)
*/
-enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
+int ice_nvm_validate_checksum(struct ice_hw *hw)
{
struct ice_aqc_nvm_checksum *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status)
@@ -1080,7 +1073,7 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
if (!status)
if (le16_to_cpu(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT)
- status = ICE_ERR_NVM_CHECKSUM;
+ status = -EIO;
return status;
}
@@ -1093,7 +1086,7 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
* Update the control word with the required banks' validity bits
* and dumps the Shadow RAM to flash (0x0707)
*/
-enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags)
+int ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags)
{
struct ice_aqc_nvm *cmd;
struct ice_aq_desc desc;
@@ -1113,7 +1106,7 @@ enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags)
* Update empr (0x0709). This command allows SW to
* request an EMPR to activate new FW.
*/
-enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw)
+int ice_aq_nvm_update_empr(struct ice_hw *hw)
{
struct ice_aq_desc desc;
@@ -1136,7 +1129,7 @@ enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw)
* as part of the NVM update as the first cmd in the flow.
*/
-enum ice_status
+int
ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
u16 length, struct ice_sq_cd *cd)
{
@@ -1144,7 +1137,7 @@ ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
struct ice_aq_desc desc;
if (length != 0 && !data)
- return ICE_ERR_PARAM;
+ return -EINVAL;
cmd = &desc.params.pkg_data;
@@ -1173,17 +1166,17 @@ ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
* the TransferFlag is set to End or StartAndEnd.
*/
-enum ice_status
+int
ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length,
u8 transfer_flag, u8 *comp_response,
u8 *comp_response_code, struct ice_sq_cd *cd)
{
struct ice_aqc_nvm_pass_comp_tbl *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (!data || !comp_response || !comp_response_code)
- return ICE_ERR_PARAM;
+ return -EINVAL;
cmd = &desc.params.pass_comp_tbl;
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h
index c6f05f43d593..f1d754ffac16 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.h
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.h
@@ -12,38 +12,34 @@ struct ice_orom_civd_info {
__le16 combo_name[32]; /* Unicode string representing the Combo Image version */
} __packed;
-enum ice_status
-ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
+int ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_nvm(struct ice_hw *hw);
-enum ice_status
+int
ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
bool read_shadow_ram);
-enum ice_status
+int
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type);
-enum ice_status
-ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom);
-enum ice_status
-ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm);
-enum ice_status
+int ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom);
+int ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm);
+int
ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist);
-enum ice_status
-ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size);
-enum ice_status ice_init_nvm(struct ice_hw *hw);
-enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
-enum ice_status
+int ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size);
+int ice_init_nvm(struct ice_hw *hw);
+int ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
+int
ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command, u8 command_flags,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd);
-enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
-enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags);
-enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw);
-enum ice_status
+int ice_nvm_validate_checksum(struct ice_hw *hw);
+int ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags);
+int ice_aq_nvm_update_empr(struct ice_hw *hw);
+int
ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
u16 length, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length,
u8 transfer_flag, u8 *comp_response,
u8 *comp_response_code, struct ice_sq_cd *cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index bf7247c6f58e..dfc7c830acf6 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -1205,10 +1205,6 @@ int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
static int
ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
{
- /* Reserved for future extensions. */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
ice_set_tx_tstamp(pf, false);
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index ce3c7bded4cb..7947223536e3 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -11,7 +11,7 @@
* This function inserts the root node of the scheduling tree topology
* to the SW DB.
*/
-static enum ice_status
+static int
ice_sched_add_root_node(struct ice_port_info *pi,
struct ice_aqc_txsched_elem_data *info)
{
@@ -19,20 +19,20 @@ ice_sched_add_root_node(struct ice_port_info *pi,
struct ice_hw *hw;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw = pi->hw;
root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL);
if (!root)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* coverity[suspicious_sizeof] */
root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0],
sizeof(*root), GFP_KERNEL);
if (!root->children) {
devm_kfree(ice_hw_to_dev(hw), root);
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
memcpy(&root->info, info, sizeof(*info));
@@ -96,14 +96,14 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
*
* This function sends a scheduling elements cmd (cmd_opc)
*/
-static enum ice_status
+static int
ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
u16 elems_req, void *buf, u16 buf_size,
u16 *elems_resp, struct ice_sq_cd *cd)
{
struct ice_aqc_sched_elem_cmd *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.sched_elem_cmd;
ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
@@ -127,7 +127,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
*
* Query scheduling elements (0x0404)
*/
-enum ice_status
+int
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd)
@@ -145,18 +145,18 @@ ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
*
* This function inserts a scheduler node to the SW DB.
*/
-enum ice_status
+int
ice_sched_add_node(struct ice_port_info *pi, u8 layer,
struct ice_aqc_txsched_elem_data *info)
{
struct ice_aqc_txsched_elem_data elem;
struct ice_sched_node *parent;
struct ice_sched_node *node;
- enum ice_status status;
struct ice_hw *hw;
+ int status;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw = pi->hw;
@@ -166,7 +166,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
if (!parent) {
ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n",
le32_to_cpu(info->parent_teid));
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
/* query the current node information from FW before adding it
@@ -178,7 +178,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
if (!node)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
if (hw->max_children[layer]) {
/* coverity[suspicious_sizeof] */
node->children = devm_kcalloc(ice_hw_to_dev(hw),
@@ -186,7 +186,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
sizeof(*node), GFP_KERNEL);
if (!node->children) {
devm_kfree(ice_hw_to_dev(hw), node);
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
}
@@ -209,7 +209,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
*
* Delete scheduling elements (0x040F)
*/
-static enum ice_status
+static int
ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_delete_elem *buf, u16 buf_size,
u16 *grps_del, struct ice_sq_cd *cd)
@@ -228,19 +228,19 @@ ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
*
* This function remove nodes from HW
*/
-static enum ice_status
+static int
ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
u16 num_nodes, u32 *node_teids)
{
struct ice_aqc_delete_elem *buf;
u16 i, num_groups_removed = 0;
- enum ice_status status;
u16 buf_size;
+ int status;
buf_size = struct_size(buf, teid, num_nodes);
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
buf->hdr.parent_teid = parent->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(num_nodes);
@@ -369,14 +369,14 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
*
* Get default scheduler topology (0x400)
*/
-static enum ice_status
+static int
ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
struct ice_aqc_get_topo_elem *buf, u16 buf_size,
u8 *num_branches, struct ice_sq_cd *cd)
{
struct ice_aqc_get_topo *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.get_topo;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
@@ -399,7 +399,7 @@ ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
*
* Add scheduling elements (0x0401)
*/
-static enum ice_status
+static int
ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_add_elem *buf, u16 buf_size,
u16 *grps_added, struct ice_sq_cd *cd)
@@ -420,7 +420,7 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
*
* Configure scheduling elements (0x0403)
*/
-static enum ice_status
+static int
ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_cfgd, struct ice_sq_cd *cd)
@@ -441,7 +441,7 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
*
* Move scheduling elements (0x0408)
*/
-static enum ice_status
+static int
ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_move_elem *buf, u16 buf_size,
u16 *grps_movd, struct ice_sq_cd *cd)
@@ -462,7 +462,7 @@ ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
*
* Suspend scheduling elements (0x0409)
*/
-static enum ice_status
+static int
ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
{
@@ -482,7 +482,7 @@ ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
*
* resume scheduling elements (0x040A)
*/
-static enum ice_status
+static int
ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
{
@@ -500,7 +500,7 @@ ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
*
* Query scheduler resource allocation (0x0412)
*/
-static enum ice_status
+static int
ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
struct ice_aqc_query_txsched_res_resp *buf,
struct ice_sq_cd *cd)
@@ -520,18 +520,18 @@ ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
*
* This function suspends or resumes HW nodes
*/
-static enum ice_status
+static int
ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
bool suspend)
{
u16 i, buf_size, num_elem_ret = 0;
- enum ice_status status;
__le32 *buf;
+ int status;
buf_size = sizeof(*buf) * num_nodes;
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
for (i = 0; i < num_nodes; i++)
buf[i] = cpu_to_le32(node_teids[i]);
@@ -558,7 +558,7 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
* @tc: TC number
* @new_numqs: number of queues
*/
-static enum ice_status
+static int
ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
{
struct ice_vsi_ctx *vsi_ctx;
@@ -566,7 +566,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi_ctx)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* allocate LAN queue contexts */
if (!vsi_ctx->lan_q_ctx[tc]) {
vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
@@ -574,7 +574,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
sizeof(*q_ctx),
GFP_KERNEL);
if (!vsi_ctx->lan_q_ctx[tc])
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
vsi_ctx->num_lan_q_entries[tc] = new_numqs;
return 0;
}
@@ -585,7 +585,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
sizeof(*q_ctx), GFP_KERNEL);
if (!q_ctx)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
prev_num * sizeof(*q_ctx));
devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]);
@@ -602,7 +602,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
* @tc: TC number
* @new_numqs: number of queues
*/
-static enum ice_status
+static int
ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
{
struct ice_vsi_ctx *vsi_ctx;
@@ -610,7 +610,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi_ctx)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* allocate RDMA queue contexts */
if (!vsi_ctx->rdma_q_ctx[tc]) {
vsi_ctx->rdma_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
@@ -618,7 +618,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
sizeof(*q_ctx),
GFP_KERNEL);
if (!vsi_ctx->rdma_q_ctx[tc])
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
return 0;
}
@@ -629,7 +629,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
sizeof(*q_ctx), GFP_KERNEL);
if (!q_ctx)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
memcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc],
prev_num * sizeof(*q_ctx));
devm_kfree(ice_hw_to_dev(hw), vsi_ctx->rdma_q_ctx[tc]);
@@ -651,14 +651,14 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
*
* RL profile function to add, query, or remove profile(s)
*/
-static enum ice_status
+static int
ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
u16 num_profiles, struct ice_aqc_rl_profile_elem *buf,
u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
{
struct ice_aqc_rl_profile *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.rl_profile;
@@ -682,7 +682,7 @@ ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
*
* Add RL profile (0x0410)
*/
-static enum ice_status
+static int
ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
u16 *num_profiles_added, struct ice_sq_cd *cd)
@@ -702,7 +702,7 @@ ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
*
* Remove RL profile (0x0415)
*/
-static enum ice_status
+static int
ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
u16 *num_profiles_removed, struct ice_sq_cd *cd)
@@ -721,24 +721,24 @@ ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
* its associated parameters from HW DB,and locally. The caller needs to
* hold scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_del_rl_profile(struct ice_hw *hw,
struct ice_aqc_rl_profile_info *rl_info)
{
struct ice_aqc_rl_profile_elem *buf;
u16 num_profiles_removed;
- enum ice_status status;
u16 num_profiles = 1;
+ int status;
if (rl_info->prof_id_ref != 0)
- return ICE_ERR_IN_USE;
+ return -EBUSY;
/* Safe to remove profile ID */
buf = &rl_info->profile;
status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf),
&num_profiles_removed, NULL);
if (status || num_profiles_removed != num_profiles)
- return ICE_ERR_CFG;
+ return -EIO;
/* Delete stale entry now */
list_del(&rl_info->list_entry);
@@ -763,7 +763,7 @@ static void ice_sched_clear_rl_prof(struct ice_port_info *pi)
list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
&pi->rl_prof_list[ln], list_entry) {
struct ice_hw *hw = pi->hw;
- enum ice_status status;
+ int status;
rl_prof_elem->prof_id_ref = 0;
status = ice_sched_del_rl_profile(hw, rl_prof_elem);
@@ -875,7 +875,7 @@ void ice_sched_cleanup_all(struct ice_hw *hw)
*
* This function add nodes to HW as well as to SW DB for a given layer
*/
-static enum ice_status
+static int
ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer, u16 num_nodes,
u16 *num_nodes_added, u32 *first_node_teid)
@@ -883,15 +883,15 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
struct ice_sched_node *prev, *new_node;
struct ice_aqc_add_elem *buf;
u16 i, num_groups_added = 0;
- enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
size_t buf_size;
+ int status = 0;
u32 teid;
buf_size = struct_size(buf, generic, num_nodes);
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
buf->hdr.parent_teid = parent->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(num_nodes);
@@ -918,7 +918,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
hw->adminq.sq_last_status);
devm_kfree(ice_hw_to_dev(hw), buf);
- return ICE_ERR_CFG;
+ return -EIO;
}
*num_nodes_added = num_nodes;
@@ -974,7 +974,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
*
* Add nodes into specific HW layer.
*/
-static enum ice_status
+static int
ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer,
@@ -989,7 +989,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
return 0;
if (!parent || layer < pi->hw->sw_entry_point_layer)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* max children per node per layer */
max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
@@ -998,8 +998,8 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
if ((parent->num_children + num_nodes) > max_child_nodes) {
/* Fail if the parent is a TC node */
if (parent == tc_node)
- return ICE_ERR_CFG;
- return ICE_ERR_MAX_LIMIT;
+ return -EIO;
+ return -ENOSPC;
}
return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
@@ -1018,7 +1018,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
*
* This function add nodes to a given layer.
*/
-static enum ice_status
+static int
ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer,
@@ -1027,7 +1027,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
{
u32 *first_teid_ptr = first_node_teid;
u16 new_num_nodes = num_nodes;
- enum ice_status status = 0;
+ int status = 0;
*num_nodes_added = 0;
while (*num_nodes_added < num_nodes) {
@@ -1045,14 +1045,14 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
if (*num_nodes_added > num_nodes) {
ice_debug(pi->hw, ICE_DBG_SCHED, "added extra nodes %d %d\n", num_nodes,
*num_nodes_added);
- status = ICE_ERR_CFG;
+ status = -EIO;
break;
}
/* break if all the nodes are added successfully */
if (!status && (*num_nodes_added == num_nodes))
break;
/* break if the error is not max limit */
- if (status && status != ICE_ERR_MAX_LIMIT)
+ if (status && status != -ENOSPC)
break;
/* Exceeded the max children */
max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
@@ -1152,7 +1152,7 @@ static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
}
if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
u32 teid = le32_to_cpu(node->info.node_teid);
- enum ice_status status;
+ int status;
/* remove the default leaf node */
status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
@@ -1198,23 +1198,23 @@ static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
* resources, default topology created by firmware and storing the information
* in SW DB.
*/
-enum ice_status ice_sched_init_port(struct ice_port_info *pi)
+int ice_sched_init_port(struct ice_port_info *pi)
{
struct ice_aqc_get_topo_elem *buf;
- enum ice_status status;
struct ice_hw *hw;
u8 num_branches;
u16 num_elems;
+ int status;
u8 i, j;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw = pi->hw;
/* Query the Default Topology from FW */
buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Query default scheduling tree topology */
status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,
@@ -1226,7 +1226,7 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) {
ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n",
num_branches);
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto err_init_port;
}
@@ -1237,7 +1237,7 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) {
ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n",
num_elems);
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto err_init_port;
}
@@ -1300,11 +1300,11 @@ err_init_port:
*
* query FW for allocated scheduler resources and store in HW struct
*/
-enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
+int ice_sched_query_res_alloc(struct ice_hw *hw)
{
struct ice_aqc_query_txsched_res_resp *buf;
- enum ice_status status = 0;
__le16 max_sibl;
+ int status = 0;
u16 i;
if (hw->layer_info)
@@ -1312,7 +1312,7 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);
if (status)
@@ -1341,7 +1341,7 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
sizeof(*hw->layer_info)),
GFP_KERNEL);
if (!hw->layer_info) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto sched_query_out;
}
@@ -1614,31 +1614,31 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
* This function adds the VSI child nodes to tree. It gets called for
* LAN and RDMA separately.
*/
-static enum ice_status
+static int
ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
struct ice_sched_node *tc_node, u16 *num_nodes,
u8 owner)
{
struct ice_sched_node *parent, *node;
struct ice_hw *hw = pi->hw;
- enum ice_status status;
u32 first_node_teid;
u16 num_added = 0;
u8 i, qgl, vsil;
+ int status;
qgl = ice_sched_get_qgrp_layer(hw);
vsil = ice_sched_get_vsi_layer(hw);
parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
for (i = vsil + 1; i <= qgl; i++) {
if (!parent)
- return ICE_ERR_CFG;
+ return -EIO;
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
num_nodes[i],
&first_node_teid,
&num_added);
if (status || num_nodes[i] != num_added)
- return ICE_ERR_CFG;
+ return -EIO;
/* The newly added node can be a new parent for the next
* layer nodes
@@ -1717,18 +1717,18 @@ ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi,
* This function adds the VSI supported nodes into Tx tree including the
* VSI, its parent and intermediate nodes in below layers
*/
-static enum ice_status
+static int
ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
struct ice_sched_node *tc_node, u16 *num_nodes)
{
struct ice_sched_node *parent = tc_node;
- enum ice_status status;
u32 first_node_teid;
u16 num_added = 0;
u8 i, vsil;
+ int status;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
vsil = ice_sched_get_vsi_layer(pi->hw);
for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
@@ -1737,7 +1737,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
&first_node_teid,
&num_added);
if (status || num_nodes[i] != num_added)
- return ICE_ERR_CFG;
+ return -EIO;
/* The newly added node can be a new parent for the next
* layer nodes
@@ -1749,7 +1749,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
parent = parent->children[0];
if (!parent)
- return ICE_ERR_CFG;
+ return -EIO;
if (i == vsil)
parent->vsi_handle = vsi_handle;
@@ -1766,7 +1766,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
*
* This function adds a new VSI into scheduler tree
*/
-static enum ice_status
+static int
ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
{
u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
@@ -1774,7 +1774,7 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* calculate number of supported nodes needed for this VSI */
ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes);
@@ -1794,7 +1794,7 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
*
* This function updates the VSI child nodes based on the number of queues
*/
-static enum ice_status
+static int
ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
u8 tc, u16 new_numqs, u8 owner)
{
@@ -1802,21 +1802,21 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
struct ice_sched_node *vsi_node;
struct ice_sched_node *tc_node;
struct ice_vsi_ctx *vsi_ctx;
- enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
u16 prev_numqs;
+ int status = 0;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
- return ICE_ERR_CFG;
+ return -EIO;
vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
if (!vsi_node)
- return ICE_ERR_CFG;
+ return -EIO;
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi_ctx)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (owner == ICE_SCHED_NODE_OWNER_LAN)
prev_numqs = vsi_ctx->sched.max_lanq[tc];
@@ -1869,22 +1869,22 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
* enabled and VSI is in suspended state then resume the VSI back. If TC is
* disabled then suspend the VSI if it is not already.
*/
-enum ice_status
+int
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable)
{
struct ice_sched_node *vsi_node, *tc_node;
struct ice_vsi_ctx *vsi_ctx;
- enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
+ int status = 0;
ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
- return ICE_ERR_PARAM;
+ return -EINVAL;
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi_ctx)
- return ICE_ERR_PARAM;
+ return -EINVAL;
vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
/* suspend the VSI if TC is not enabled */
@@ -1908,7 +1908,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
if (!vsi_node)
- return ICE_ERR_CFG;
+ return -EIO;
vsi_ctx->sched.vsi_node[tc] = vsi_node;
vsi_node->in_use = true;
@@ -1993,11 +1993,11 @@ static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
* This function removes the VSI and its LAN or RDMA children nodes from the
* scheduler tree.
*/
-static enum ice_status
+static int
ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
{
- enum ice_status status = ICE_ERR_PARAM;
struct ice_vsi_ctx *vsi_ctx;
+ int status = -EINVAL;
u8 i;
ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
@@ -2022,7 +2022,7 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
if (ice_sched_is_leaf_node_present(vsi_node)) {
ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i);
- status = ICE_ERR_IN_USE;
+ status = -EBUSY;
goto exit_sched_rm_vsi_cfg;
}
while (j < vsi_node->num_children) {
@@ -2065,7 +2065,7 @@ exit_sched_rm_vsi_cfg:
* This function clears the VSI and its LAN children nodes from scheduler tree
* for all TCs.
*/
-enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
+int ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
{
return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
}
@@ -2078,7 +2078,7 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
* This function clears the VSI and its RDMA children nodes from scheduler tree
* for all TCs.
*/
-enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)
+int ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)
{
return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA);
}
@@ -2188,36 +2188,36 @@ ice_sched_update_parent(struct ice_sched_node *new_parent,
*
* This function move the child nodes to a given parent.
*/
-static enum ice_status
+static int
ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
u16 num_items, u32 *list)
{
struct ice_aqc_move_elem *buf;
struct ice_sched_node *node;
- enum ice_status status = 0;
u16 i, grps_movd = 0;
struct ice_hw *hw;
+ int status = 0;
u16 buf_len;
hw = pi->hw;
if (!parent || !num_items)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Does parent have enough space */
if (parent->num_children + num_items >
hw->max_children[parent->tx_sched_layer])
- return ICE_ERR_AQ_FULL;
+ return -ENOSPC;
buf_len = struct_size(buf, teid, 1);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
for (i = 0; i < num_items; i++) {
node = ice_sched_find_node_by_teid(pi->root, list[i]);
if (!node) {
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto move_err_exit;
}
@@ -2228,7 +2228,7 @@ ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
status = ice_aq_move_sched_elems(hw, 1, buf, buf_len,
&grps_movd, NULL);
if (status && grps_movd != 1) {
- status = ICE_ERR_CFG;
+ status = -EIO;
goto move_err_exit;
}
@@ -2251,28 +2251,28 @@ move_err_exit:
* This function moves a VSI to an aggregator node or its subtree.
* Intermediate nodes may be created if required.
*/
-static enum ice_status
+static int
ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
u8 tc)
{
struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent;
u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
u32 first_node_teid, vsi_teid;
- enum ice_status status;
u16 num_nodes_added;
u8 aggl, vsil, i;
+ int status;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
- return ICE_ERR_CFG;
+ return -EIO;
agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
if (!agg_node)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
if (!vsi_node)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
/* Is this VSI already part of given aggregator? */
if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node))
@@ -2302,7 +2302,7 @@ ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
&first_node_teid,
&num_nodes_added);
if (status || num_nodes[i] != num_nodes_added)
- return ICE_ERR_CFG;
+ return -EIO;
/* The newly added node can be a new parent for the next
* layer nodes
@@ -2314,7 +2314,7 @@ ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
parent = parent->children[0];
if (!parent)
- return ICE_ERR_CFG;
+ return -EIO;
}
move_nodes:
@@ -2333,14 +2333,14 @@ move_nodes:
* aggregator VSI info based on passed in boolean parameter rm_vsi_info. The
* caller holds the scheduler lock.
*/
-static enum ice_status
+static int
ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi,
struct ice_sched_agg_info *agg_info, u8 tc,
bool rm_vsi_info)
{
struct ice_sched_agg_vsi_info *agg_vsi_info;
struct ice_sched_agg_vsi_info *tmp;
- enum ice_status status = 0;
+ int status = 0;
list_for_each_entry_safe(agg_vsi_info, tmp, &agg_info->agg_vsi_list,
list_entry) {
@@ -2397,7 +2397,7 @@ ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node)
* This function removes the aggregator node and intermediate nodes if any
* from the given TC
*/
-static enum ice_status
+static int
ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
{
struct ice_sched_node *tc_node, *agg_node;
@@ -2405,15 +2405,15 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
- return ICE_ERR_CFG;
+ return -EIO;
agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
if (!agg_node)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
/* Can't remove the aggregator node if it has children */
if (ice_sched_is_agg_inuse(pi, agg_node))
- return ICE_ERR_IN_USE;
+ return -EBUSY;
/* need to remove the whole subtree if aggregator node is the
* only child.
@@ -2422,7 +2422,7 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
struct ice_sched_node *parent = agg_node->parent;
if (!parent)
- return ICE_ERR_CFG;
+ return -EIO;
if (parent->num_children > 1)
break;
@@ -2445,11 +2445,11 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
* the aggregator configuration completely for requested TC. The caller needs
* to hold the scheduler lock.
*/
-static enum ice_status
+static int
ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info,
u8 tc, bool rm_vsi_info)
{
- enum ice_status status = 0;
+ int status = 0;
/* If nothing to remove - return success */
if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
@@ -2478,7 +2478,7 @@ exit_rm_agg_cfg_tc:
* Save aggregator TC bitmap. This function needs to be called with scheduler
* lock held.
*/
-static enum ice_status
+static int
ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
unsigned long *tc_bitmap)
{
@@ -2486,7 +2486,7 @@ ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
agg_info = ice_get_agg_info(pi->hw, agg_id);
if (!agg_info)
- return ICE_ERR_PARAM;
+ return -EINVAL;
bitmap_copy(agg_info->replay_tc_bitmap, tc_bitmap,
ICE_MAX_TRAFFIC_CLASS);
return 0;
@@ -2501,20 +2501,20 @@ ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
* This function creates an aggregator node and intermediate nodes if required
* for the given TC
*/
-static enum ice_status
+static int
ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
{
struct ice_sched_node *parent, *agg_node, *tc_node;
u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
- enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
u32 first_node_teid;
u16 num_nodes_added;
+ int status = 0;
u8 i, aggl;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
- return ICE_ERR_CFG;
+ return -EIO;
agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
/* Does Agg node already exist ? */
@@ -2549,14 +2549,14 @@ ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
parent = tc_node;
for (i = hw->sw_entry_point_layer; i <= aggl; i++) {
if (!parent)
- return ICE_ERR_CFG;
+ return -EIO;
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
num_nodes[i],
&first_node_teid,
&num_nodes_added);
if (status || num_nodes[i] != num_nodes_added)
- return ICE_ERR_CFG;
+ return -EIO;
/* The newly added node can be a new parent for the next
* layer nodes
@@ -2591,13 +2591,13 @@ ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
* resources and remove aggregator ID.
* This function needs to be called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
enum ice_agg_type agg_type, unsigned long *tc_bitmap)
{
struct ice_sched_agg_info *agg_info;
- enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
+ int status = 0;
u8 tc;
agg_info = ice_get_agg_info(hw, agg_id);
@@ -2606,7 +2606,7 @@ ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
agg_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*agg_info),
GFP_KERNEL);
if (!agg_info)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
agg_info->agg_id = agg_id;
agg_info->agg_type = agg_type;
@@ -2653,19 +2653,17 @@ ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
*
* This function configures aggregator node(s).
*/
-enum ice_status
+int
ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type,
u8 tc_bitmap)
{
unsigned long bitmap = tc_bitmap;
- enum ice_status status;
+ int status;
mutex_lock(&pi->sched_lock);
- status = ice_sched_cfg_agg(pi, agg_id, agg_type,
- (unsigned long *)&bitmap);
+ status = ice_sched_cfg_agg(pi, agg_id, agg_type, &bitmap);
if (!status)
- status = ice_save_agg_tc_bitmap(pi, agg_id,
- (unsigned long *)&bitmap);
+ status = ice_save_agg_tc_bitmap(pi, agg_id, &bitmap);
mutex_unlock(&pi->sched_lock);
return status;
}
@@ -2724,7 +2722,7 @@ ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle)
* Save VSI to aggregator TC bitmap. This function needs to call with scheduler
* lock held.
*/
-static enum ice_status
+static int
ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
unsigned long *tc_bitmap)
{
@@ -2733,11 +2731,11 @@ ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
agg_info = ice_get_agg_info(pi->hw, agg_id);
if (!agg_info)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* check if entry already exist */
agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
if (!agg_vsi_info)
- return ICE_ERR_PARAM;
+ return -EINVAL;
bitmap_copy(agg_vsi_info->replay_tc_bitmap, tc_bitmap,
ICE_MAX_TRAFFIC_CLASS);
return 0;
@@ -2754,21 +2752,21 @@ ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
* already associated to the aggregator node then no operation is performed on
* the tree. This function needs to be called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
u16 vsi_handle, unsigned long *tc_bitmap)
{
struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL;
struct ice_sched_agg_info *agg_info, *old_agg_info;
- enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
+ int status = 0;
u8 tc;
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
agg_info = ice_get_agg_info(hw, agg_id);
if (!agg_info)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* If the VSI is already part of another aggregator then update
* its VSI info list
*/
@@ -2790,7 +2788,7 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
agg_vsi_info = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*agg_vsi_info), GFP_KERNEL);
if (!agg_vsi_info)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* add VSI ID into the aggregator list */
agg_vsi_info->vsi_handle = vsi_handle;
@@ -2851,14 +2849,14 @@ static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)
* returns success or error on config sched element failure. The caller
* needs to hold scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
struct ice_aqc_txsched_elem_data *info)
{
struct ice_aqc_txsched_elem_data buf;
- enum ice_status status;
u16 elem_cfgd = 0;
u16 num_elems = 1;
+ int status;
buf = *info;
/* Parent TEID is reserved field in this aq call */
@@ -2874,7 +2872,7 @@ ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
&elem_cfgd, NULL);
if (status || elem_cfgd != num_elems) {
ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n");
- return ICE_ERR_CFG;
+ return -EIO;
}
/* Config success case */
@@ -2893,7 +2891,7 @@ ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
*
* This function configures node element's BW allocation.
*/
-static enum ice_status
+static int
ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
enum ice_rl_type rl_type, u16 bw_alloc)
{
@@ -2909,7 +2907,7 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
data->eir_bw.bw_alloc = cpu_to_le16(bw_alloc);
} else {
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
/* Configure element */
@@ -2925,12 +2923,12 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
*
* Move or associate VSI to a new or default aggregator node.
*/
-enum ice_status
+int
ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
u8 tc_bitmap)
{
unsigned long bitmap = tc_bitmap;
- enum ice_status status;
+ int status;
mutex_lock(&pi->sched_lock);
status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle,
@@ -3098,12 +3096,12 @@ static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw)
*
* This function converts the BW to profile structure format.
*/
-static enum ice_status
+static int
ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
struct ice_aqc_rl_profile_elem *profile)
{
- enum ice_status status = ICE_ERR_PARAM;
s64 bytes_per_sec, ts_rate, mv_tmp;
+ int status = -EINVAL;
bool found = false;
s32 encode = 0;
s64 mv = 0;
@@ -3150,7 +3148,7 @@ ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
profile->rl_encode = cpu_to_le16(encode);
status = 0;
} else {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
}
return status;
@@ -3176,9 +3174,9 @@ ice_sched_add_rl_profile(struct ice_port_info *pi,
struct ice_aqc_rl_profile_info *rl_prof_elem;
u16 profiles_added = 0, num_profiles = 1;
struct ice_aqc_rl_profile_elem *buf;
- enum ice_status status;
struct ice_hw *hw;
u8 profile_type;
+ int status;
if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
return NULL;
@@ -3249,7 +3247,7 @@ exit_add_rl_prof:
*
* This function configures node element's BW limit.
*/
-static enum ice_status
+static int
ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
enum ice_rl_type rl_type, u16 rl_prof_id)
{
@@ -3268,7 +3266,7 @@ ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
* hence only one of them may be set for any given element
*/
if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
- return ICE_ERR_CFG;
+ return -EIO;
data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
data->eir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
break;
@@ -3291,7 +3289,7 @@ ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) &&
(le16_to_cpu(data->eir_bw.bw_profile_idx) !=
ICE_SCHED_DFLT_RL_PROF_ID))
- return ICE_ERR_CFG;
+ return -EIO;
/* EIR BW is set to default, disable it */
data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR;
/* Okay to enable shared BW now */
@@ -3300,7 +3298,7 @@ ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
break;
default:
/* Unknown rate limit type */
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
/* Configure element */
@@ -3420,15 +3418,15 @@ ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer)
* 'profile_type' and profile ID as 'profile_id'. The caller needs to hold
* scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
u16 profile_id)
{
struct ice_aqc_rl_profile_info *rl_prof_elem;
- enum ice_status status = 0;
+ int status = 0;
if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Check the existing list for RL profile */
list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
list_entry)
@@ -3441,11 +3439,11 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
/* Remove old profile ID from database */
status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem);
- if (status && status != ICE_ERR_IN_USE)
+ if (status && status != -EBUSY)
ice_debug(pi->hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
break;
}
- if (status == ICE_ERR_IN_USE)
+ if (status == -EBUSY)
status = 0;
return status;
}
@@ -3461,16 +3459,16 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
* type CIR, EIR, or SRL to default. This function needs to be called
* with the scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
struct ice_sched_node *node,
enum ice_rl_type rl_type, u8 layer_num)
{
- enum ice_status status;
struct ice_hw *hw;
u8 profile_type;
u16 rl_prof_id;
u16 old_id;
+ int status;
hw = pi->hw;
switch (rl_type) {
@@ -3488,7 +3486,7 @@ ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID;
break;
default:
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
/* Save existing RL prof ID for later clean up */
old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
@@ -3518,7 +3516,7 @@ ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
* them may be set for any given element. This function needs to be called
* with the scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_set_eir_srl_excl(struct ice_port_info *pi,
struct ice_sched_node *node,
u8 layer_num, enum ice_rl_type rl_type, u32 bw)
@@ -3562,14 +3560,14 @@ ice_sched_set_eir_srl_excl(struct ice_port_info *pi,
* node's RL profile ID of type CIR, EIR, or SRL, and removes old profile
* ID from local database. The caller needs to hold scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
enum ice_rl_type rl_type, u32 bw, u8 layer_num)
{
struct ice_aqc_rl_profile_info *rl_prof_info;
- enum ice_status status = ICE_ERR_PARAM;
struct ice_hw *hw = pi->hw;
u16 old_id, rl_prof_id;
+ int status = -EINVAL;
rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num);
if (!rl_prof_info)
@@ -3608,31 +3606,31 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
* It updates node's BW limit parameters like BW RL profile ID of type CIR,
* EIR, or SRL. The caller needs to hold scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
enum ice_rl_type rl_type, u32 bw)
{
struct ice_sched_node *cfg_node = node;
- enum ice_status status;
+ int status;
struct ice_hw *hw;
u8 layer_num;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw = pi->hw;
/* Remove unused RL profile IDs from HW and SW DB */
ice_sched_rm_unused_rl_prof(pi);
layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
node->tx_sched_layer);
if (layer_num >= hw->num_tx_sched_layers)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (rl_type == ICE_SHARED_BW) {
/* SRL node may be different */
cfg_node = ice_sched_get_srl_node(node, layer_num);
if (!cfg_node)
- return ICE_ERR_CFG;
+ return -EIO;
}
/* EIR BW and Shared BW profiles are mutually exclusive and
* hence only one of them may be set for any given element
@@ -3657,7 +3655,7 @@ ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
* type CIR, EIR, or SRL to default. This function needs to be called
* with the scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
struct ice_sched_node *node,
enum ice_rl_type rl_type)
@@ -3675,7 +3673,7 @@ ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
* behalf of the requested node (first argument). This function needs to be
* called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
{
/* SRL profiles are not available on all layers. Check if the
@@ -3690,7 +3688,7 @@ ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
(node->parent && node->parent->num_children == 1)))
return 0;
- return ICE_ERR_CFG;
+ return -EIO;
}
/**
@@ -3701,7 +3699,7 @@ ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
*
* Save BW information of queue type node for post replay use.
*/
-static enum ice_status
+static int
ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
{
switch (rl_type) {
@@ -3715,7 +3713,7 @@ ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw);
break;
default:
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
return 0;
}
@@ -3731,16 +3729,16 @@ ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
*
* This function sets BW limit of queue scheduling node.
*/
-static enum ice_status
+static int
ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type, u32 bw)
{
- enum ice_status status = ICE_ERR_PARAM;
struct ice_sched_node *node;
struct ice_q_ctx *q_ctx;
+ int status = -EINVAL;
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&pi->sched_lock);
q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle);
if (!q_ctx)
@@ -3762,7 +3760,7 @@ ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type,
node->tx_sched_layer);
if (sel_layer >= pi->hw->num_tx_sched_layers) {
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto exit_q_bw_lmt;
}
status = ice_sched_validate_srl_node(node, sel_layer);
@@ -3794,7 +3792,7 @@ exit_q_bw_lmt:
*
* This function configures BW limit of queue scheduling node.
*/
-enum ice_status
+int
ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type, u32 bw)
{
@@ -3812,7 +3810,7 @@ ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
*
* This function configures BW default limit of queue scheduling node.
*/
-enum ice_status
+int
ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type)
{
@@ -3880,13 +3878,13 @@ ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id,
* This function sets BW limit of VSI or Aggregator scheduling node
* based on TC information from passed in argument BW.
*/
-static enum ice_status
+int
ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
enum ice_agg_type agg_type, u8 tc,
enum ice_rl_type rl_type, u32 bw)
{
- enum ice_status status = ICE_ERR_PARAM;
struct ice_sched_node *node;
+ int status = -EINVAL;
if (!pi)
return status;
@@ -3921,7 +3919,7 @@ exit_set_node_bw_lmt_per_tc:
* This function configures BW limit of VSI scheduling node based on TC
* information.
*/
-enum ice_status
+int
ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type, u32 bw)
{
@@ -3948,7 +3946,7 @@ ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
* This function configures default BW limit of VSI scheduling node based on TC
* information.
*/
-enum ice_status
+int
ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type)
{
@@ -3976,13 +3974,13 @@ ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
* burst size value is used for future rate limit calls. It doesn't change the
* existing or previously created RL profiles.
*/
-enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
+int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
{
u16 burst_size_to_prog;
if (bytes < ICE_MIN_BURST_SIZE_ALLOWED ||
bytes > ICE_MAX_BURST_SIZE_ALLOWED)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (ice_round_to_num(bytes, 64) <=
ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) {
/* 64 byte granularity case */
@@ -4017,13 +4015,13 @@ enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
* This function configures node element's priority value. It
* needs to be called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
u8 priority)
{
struct ice_aqc_txsched_elem_data buf;
struct ice_aqc_txsched_elem *data;
- enum ice_status status;
+ int status;
buf = node->info;
data = &buf.data;
@@ -4044,12 +4042,12 @@ ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
* This function restores node's BW from bw_t_info. The caller needs
* to hold the scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
struct ice_bw_type_info *bw_t_info)
{
struct ice_port_info *pi = hw->port_info;
- enum ice_status status = ICE_ERR_PARAM;
+ int status = -EINVAL;
u16 bw_alloc;
if (!node)
@@ -4137,7 +4135,7 @@ void ice_sched_replay_agg(struct ice_hw *hw)
if (!bitmap_equal(agg_info->tc_bitmap, agg_info->replay_tc_bitmap,
ICE_MAX_TRAFFIC_CLASS)) {
DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
- enum ice_status status;
+ int status;
bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
ice_sched_get_ena_tc_bitmap(pi,
@@ -4191,18 +4189,17 @@ void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw)
* their node bandwidth information. This function needs to be called with
* scheduler lock held.
*/
-static enum ice_status
-ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
+static int ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
{
DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
struct ice_sched_agg_vsi_info *agg_vsi_info;
struct ice_port_info *pi = hw->port_info;
struct ice_sched_agg_info *agg_info;
- enum ice_status status;
+ int status;
bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
if (!agg_info)
return 0; /* Not present in list - default Agg case */
@@ -4233,10 +4230,10 @@ ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
* This function replays association of VSI to aggregator type nodes, and
* node bandwidth information.
*/
-enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
+int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
{
struct ice_port_info *pi = hw->port_info;
- enum ice_status status;
+ int status;
mutex_lock(&pi->sched_lock);
status = ice_sched_replay_vsi_agg(hw, vsi_handle);
@@ -4252,14 +4249,13 @@ enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
* This function replays queue type node bandwidth. This function needs to be
* called with scheduler lock held.
*/
-enum ice_status
-ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)
+int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)
{
struct ice_sched_node *q_node;
/* Following also checks the presence of node in tree */
q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
if (!q_node)
- return ICE_ERR_PARAM;
+ return -EINVAL;
return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index 6bddcbecaf5e..4f91577fed56 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -65,12 +65,12 @@ struct ice_sched_agg_info {
};
/* FW AQ command calls */
-enum ice_status
+int
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd);
-enum ice_status ice_sched_init_port(struct ice_port_info *pi);
-enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
+int ice_sched_init_port(struct ice_port_info *pi);
+int ice_sched_query_res_alloc(struct ice_hw *hw);
void ice_sched_get_psm_clk_freq(struct ice_hw *hw);
void ice_sched_clear_port(struct ice_port_info *pi);
@@ -79,7 +79,7 @@ void ice_sched_clear_agg(struct ice_hw *hw);
struct ice_sched_node *
ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid);
-enum ice_status
+int
ice_sched_add_node(struct ice_port_info *pi, u8 layer,
struct ice_aqc_txsched_elem_data *info);
void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node);
@@ -87,35 +87,38 @@ struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc);
struct ice_sched_node *
ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u8 owner);
-enum ice_status
+int
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable);
-enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
-enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle);
+int ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
+int ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle);
/* Tx scheduler rate limiter functions */
-enum ice_status
+int
ice_cfg_agg(struct ice_port_info *pi, u32 agg_id,
enum ice_agg_type agg_type, u8 tc_bitmap);
-enum ice_status
+int
ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
u8 tc_bitmap);
-enum ice_status
+int
ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type, u32 bw);
-enum ice_status
+int
ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type);
-enum ice_status
+int
ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type, u32 bw);
-enum ice_status
+int
ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type);
-enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);
+int
+ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
+ enum ice_agg_type agg_type, u8 tc,
+ enum ice_rl_type rl_type, u32 bw);
+int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);
void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw);
void ice_sched_replay_agg(struct ice_hw *hw);
-enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status
-ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
+int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
+int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
#endif /* _ICE_SCHED_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index aa11d07793d4..52c6bac41bf7 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -18,7 +18,7 @@
* queue and asynchronously sending message via
* ice_sq_send_cmd() function
*/
-enum ice_status
+int
ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
u8 *msg, u16 msglen, struct ice_sq_cd *cd)
{
@@ -228,7 +228,7 @@ ice_mbx_traverse(struct ice_hw *hw,
* sent per VF and marks the VF as malicious if it exceeds
* the permissible number of messages to send.
*/
-static enum ice_status
+static int
ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id,
enum ice_mbx_snapshot_state *new_state,
bool *is_malvf)
@@ -236,7 +236,7 @@ ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id,
struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
if (vf_id >= snap->mbx_vf.vfcntr_len)
- return ICE_ERR_OUT_OF_RANGE;
+ return -EIO;
/* increment the message count in the VF array */
snap->mbx_vf.vf_cntr[vf_id]++;
@@ -297,7 +297,7 @@ static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap)
* Detect: If pending message count exceeds watermark traverse
* the static snapshot and look for a malicious VF.
*/
-enum ice_status
+int
ice_mbx_vf_state_handler(struct ice_hw *hw,
struct ice_mbx_data *mbx_data, u16 vf_id,
bool *is_malvf)
@@ -306,10 +306,10 @@ ice_mbx_vf_state_handler(struct ice_hw *hw,
struct ice_mbx_snap_buffer_data *snap_buf;
struct ice_ctl_q_info *cq = &hw->mailboxq;
enum ice_mbx_snapshot_state new_state;
- enum ice_status status = 0;
+ int status = 0;
if (!is_malvf || !mbx_data)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
/* When entering the mailbox state machine assume that the VF
* is not malicious until detected.
@@ -320,7 +320,7 @@ ice_mbx_vf_state_handler(struct ice_hw *hw,
* interrupt is not less than the defined AVF message threshold.
*/
if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD)
- return ICE_ERR_INVAL_SIZE;
+ return -EINVAL;
/* The watermark value should not be lesser than the threshold limit
* set for the number of asynchronous messages a VF can send to mailbox
@@ -329,7 +329,7 @@ ice_mbx_vf_state_handler(struct ice_hw *hw,
*/
if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD ||
mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx)
- return ICE_ERR_PARAM;
+ return -EINVAL;
new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
snap_buf = &snap->mbx_buf;
@@ -383,7 +383,7 @@ ice_mbx_vf_state_handler(struct ice_hw *hw,
default:
new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
- status = ICE_ERR_CFG;
+ status = -EIO;
}
snap_buf->state = new_state;
@@ -405,20 +405,20 @@ ice_mbx_vf_state_handler(struct ice_hw *hw,
* the input vf_id against the bitmap to verify if the VF has been
* detected in any previous mailbox iterations.
*/
-enum ice_status
+int
ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
u16 bitmap_len, u16 vf_id, bool *report_malvf)
{
if (!all_malvfs || !report_malvf)
- return ICE_ERR_PARAM;
+ return -EINVAL;
*report_malvf = false;
if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len)
- return ICE_ERR_INVAL_SIZE;
+ return -EINVAL;
if (vf_id >= bitmap_len)
- return ICE_ERR_OUT_OF_RANGE;
+ return -EIO;
/* If the vf_id is found in the bitmap set bit and boolean to true */
if (!test_and_set_bit(vf_id, all_malvfs))
@@ -441,19 +441,19 @@ ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
* that the new VF loaded is not considered malicious before going
* through the overflow detection algorithm.
*/
-enum ice_status
+int
ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
u16 bitmap_len, u16 vf_id)
{
if (!snap || !all_malvfs)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (bitmap_len < snap->mbx_vf.vfcntr_len)
- return ICE_ERR_INVAL_SIZE;
+ return -EINVAL;
/* Ensure VF ID value is not larger than bitmap or VF counter length */
if (vf_id >= bitmap_len || vf_id >= snap->mbx_vf.vfcntr_len)
- return ICE_ERR_OUT_OF_RANGE;
+ return -EIO;
/* Clear VF ID bit in the bitmap tracking malicious VFs attached to PF */
clear_bit(vf_id, all_malvfs);
@@ -482,7 +482,7 @@ ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
* called to ensure that the vf_count can be compared against the number
* of VFs supported as defined in the functional capabilities of the device.
*/
-enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count)
+int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count)
{
struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
@@ -491,13 +491,13 @@ enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count)
* the functional capabilities of the PF.
*/
if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs)
- return ICE_ERR_INVAL_SIZE;
+ return -EINVAL;
snap->mbx_vf.vf_cntr = devm_kcalloc(ice_hw_to_dev(hw), vf_count,
sizeof(*snap->mbx_vf.vf_cntr),
GFP_KERNEL);
if (!snap->mbx_vf.vf_cntr)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Setting the VF counter length to the number of allocated
* VFs for given PF's functional capabilities.
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h
index 161dc55d9e9c..68686a3fd7e8 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.h
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.h
@@ -14,24 +14,24 @@
#define ICE_ASYNC_VF_MSG_THRESHOLD 63
#ifdef CONFIG_PCI_IOV
-enum ice_status
+int
ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
u8 *msg, u16 msglen, struct ice_sq_cd *cd);
u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
-enum ice_status
+int
ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
u16 vf_id, bool *is_mal_vf);
-enum ice_status
+int
ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
u16 bitmap_len, u16 vf_id);
-enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count);
+int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count);
void ice_mbx_deinit_snapshot(struct ice_hw *hw);
-enum ice_status
+int
ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
u16 bitmap_len, u16 vf_id, bool *report_malvf);
#else /* CONFIG_PCI_IOV */
-static inline enum ice_status
+static inline int
ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw,
u16 __always_unused vfid, u32 __always_unused v_opcode,
u32 __always_unused v_retval, u8 __always_unused *msg,
diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h
deleted file mode 100644
index dbf66057371d..000000000000
--- a/drivers/net/ethernet/intel/ice/ice_status.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2018, Intel Corporation. */
-
-#ifndef _ICE_STATUS_H_
-#define _ICE_STATUS_H_
-
-/* Error Codes */
-enum ice_status {
- ICE_SUCCESS = 0,
-
- /* Generic codes : Range -1..-49 */
- ICE_ERR_PARAM = -1,
- ICE_ERR_NOT_IMPL = -2,
- ICE_ERR_NOT_READY = -3,
- ICE_ERR_NOT_SUPPORTED = -4,
- ICE_ERR_BAD_PTR = -5,
- ICE_ERR_INVAL_SIZE = -6,
- ICE_ERR_DEVICE_NOT_SUPPORTED = -8,
- ICE_ERR_RESET_FAILED = -9,
- ICE_ERR_FW_API_VER = -10,
- ICE_ERR_NO_MEMORY = -11,
- ICE_ERR_CFG = -12,
- ICE_ERR_OUT_OF_RANGE = -13,
- ICE_ERR_ALREADY_EXISTS = -14,
- ICE_ERR_DOES_NOT_EXIST = -15,
- ICE_ERR_IN_USE = -16,
- ICE_ERR_MAX_LIMIT = -17,
- ICE_ERR_RESET_ONGOING = -18,
- ICE_ERR_HW_TABLE = -19,
- ICE_ERR_FW_DDP_MISMATCH = -20,
-
- ICE_ERR_NVM = -50,
- ICE_ERR_NVM_CHECKSUM = -51,
- ICE_ERR_BUF_TOO_SHORT = -52,
- ICE_ERR_NVM_BLANK_MODE = -53,
- ICE_ERR_AQ_ERROR = -100,
- ICE_ERR_AQ_TIMEOUT = -101,
- ICE_ERR_AQ_FULL = -102,
- ICE_ERR_AQ_NO_WORK = -103,
- ICE_ERR_AQ_EMPTY = -104,
- ICE_ERR_AQ_FW_CRITICAL = -105,
-};
-
-#endif /* _ICE_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 793f4a9fc2cd..e477c2b1d5bb 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -528,7 +528,7 @@ static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES],
* Allocate memory for the entire recipe table and initialize the structures/
* entries corresponding to basic recipes.
*/
-enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
+int ice_init_def_sw_recp(struct ice_hw *hw)
{
struct ice_sw_recipe *recps;
u8 i;
@@ -536,7 +536,7 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
sizeof(*recps), GFP_KERNEL);
if (!recps)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
recps[i].root_rid = i;
@@ -576,14 +576,14 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
* in response buffer. The caller of this function to use *num_elems while
* parsing the response buffer.
*/
-static enum ice_status
+static int
ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
u16 buf_size, u16 *req_desc, u16 *num_elems,
struct ice_sq_cd *cd)
{
struct ice_aqc_get_sw_cfg *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
cmd = &desc.params.get_sw_conf;
@@ -606,14 +606,14 @@ ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
*
* Add a VSI context to the hardware (0x0210)
*/
-static enum ice_status
+static int
ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
struct ice_aqc_add_update_free_vsi_resp *res;
struct ice_aqc_add_get_update_free_vsi *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.vsi_cmd;
res = &desc.params.add_update_free_vsi_res;
@@ -650,14 +650,14 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
*
* Free VSI context info from hardware (0x0213)
*/
-static enum ice_status
+static int
ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
bool keep_vsi_alloc, struct ice_sq_cd *cd)
{
struct ice_aqc_add_update_free_vsi_resp *resp;
struct ice_aqc_add_get_update_free_vsi *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.vsi_cmd;
resp = &desc.params.add_update_free_vsi_res;
@@ -685,14 +685,14 @@ ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
*
* Update VSI context in the hardware (0x0211)
*/
-static enum ice_status
+static int
ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
struct ice_aqc_add_update_free_vsi_resp *resp;
struct ice_aqc_add_get_update_free_vsi *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.vsi_cmd;
resp = &desc.params.add_update_free_vsi_res;
@@ -832,15 +832,15 @@ void ice_clear_all_vsi_ctx(struct ice_hw *hw)
* If this function gets called after reset for existing VSIs then update
* with the new HW VSI number in the corresponding VSI handle list entry.
*/
-enum ice_status
+int
ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
struct ice_vsi_ctx *tmp_vsi_ctx;
- enum ice_status status;
+ int status;
if (vsi_handle >= ICE_MAX_VSI)
- return ICE_ERR_PARAM;
+ return -EINVAL;
status = ice_aq_add_vsi(hw, vsi_ctx, cd);
if (status)
return status;
@@ -851,7 +851,7 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
sizeof(*tmp_vsi_ctx), GFP_KERNEL);
if (!tmp_vsi_ctx) {
ice_aq_free_vsi(hw, vsi_ctx, false, cd);
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
*tmp_vsi_ctx = *vsi_ctx;
ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
@@ -873,14 +873,14 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
*
* Free VSI context info from hardware as well as from VSI handle list
*/
-enum ice_status
+int
ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
bool keep_vsi_alloc, struct ice_sq_cd *cd)
{
- enum ice_status status;
+ int status;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
if (!status)
@@ -897,12 +897,12 @@ ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
*
* Update VSI context in the hardware
*/
-enum ice_status
+int
ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
return ice_aq_update_vsi(hw, vsi_ctx, cd);
}
@@ -927,7 +927,7 @@ ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
else
ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
- return ice_status_to_errno(ice_update_vsi(hw, vsi_handle, ctx, NULL));
+ return ice_update_vsi(hw, vsi_handle, ctx, NULL);
}
/**
@@ -939,20 +939,20 @@ ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
*
* allocates or free a VSI list resource
*/
-static enum ice_status
+static int
ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
enum ice_sw_lkup_type lkup_type,
enum ice_adminq_opc opc)
{
struct ice_aqc_alloc_free_res_elem *sw_buf;
struct ice_aqc_res_elem *vsi_ele;
- enum ice_status status;
u16 buf_len;
+ int status;
buf_len = struct_size(sw_buf, elem, 1);
sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
if (!sw_buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
sw_buf->num_elems = cpu_to_le16(1);
if (lkup_type == ICE_SW_LKUP_MAC ||
@@ -966,7 +966,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
sw_buf->res_type =
cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
} else {
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto ice_aq_alloc_free_vsi_list_exit;
}
@@ -998,17 +998,17 @@ ice_aq_alloc_free_vsi_list_exit:
*
* Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
*/
-enum ice_status
+int
ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (opc != ice_aqc_opc_add_sw_rules &&
opc != ice_aqc_opc_update_sw_rules &&
opc != ice_aqc_opc_remove_sw_rules)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
@@ -1018,7 +1018,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
if (opc != ice_aqc_opc_add_sw_rules &&
hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
return status;
}
@@ -1032,7 +1032,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
*
* Add(0x0290)
*/
-static enum ice_status
+static int
ice_aq_add_recipe(struct ice_hw *hw,
struct ice_aqc_recipe_data_elem *s_recipe_list,
u16 num_recipes, struct ice_sq_cd *cd)
@@ -1069,18 +1069,18 @@ ice_aq_add_recipe(struct ice_hw *hw,
* The caller must supply enough space in s_recipe_list to hold all possible
* recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
*/
-static enum ice_status
+static int
ice_aq_get_recipe(struct ice_hw *hw,
struct ice_aqc_recipe_data_elem *s_recipe_list,
u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
{
struct ice_aqc_add_get_recipe *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
u16 buf_size;
+ int status;
if (*num_recipes != ICE_MAX_NUM_RECIPES)
- return ICE_ERR_PARAM;
+ return -EINVAL;
cmd = &desc.params.add_get_recipe;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
@@ -1104,7 +1104,7 @@ ice_aq_get_recipe(struct ice_hw *hw,
* @cd: pointer to command details structure or NULL
* Recipe to profile association (0x0291)
*/
-static enum ice_status
+static int
ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
struct ice_sq_cd *cd)
{
@@ -1130,13 +1130,13 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
* @cd: pointer to command details structure or NULL
* Associate profile ID with given recipe (0x0293)
*/
-static enum ice_status
+static int
ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
struct ice_sq_cd *cd)
{
struct ice_aqc_recipe_to_profile *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.recipe_to_profile;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
@@ -1154,16 +1154,16 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
* @hw: pointer to the hardware structure
* @rid: recipe ID returned as response to AQ call
*/
-static enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
+static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
{
struct ice_aqc_alloc_free_res_elem *sw_buf;
- enum ice_status status;
u16 buf_len;
+ int status;
buf_len = struct_size(sw_buf, elem, 1);
sw_buf = kzalloc(buf_len, GFP_KERNEL);
if (!sw_buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
sw_buf->num_elems = cpu_to_le16(1);
sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
@@ -1230,7 +1230,7 @@ ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
* bookkeeping so that we have a current list of all the recipes that are
* programmed in the firmware.
*/
-static enum ice_status
+static int
ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
bool *refresh_required)
{
@@ -1238,16 +1238,16 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
struct ice_aqc_recipe_data_elem *tmp;
u16 num_recps = ICE_MAX_NUM_RECIPES;
struct ice_prot_lkup_ext *lkup_exts;
- enum ice_status status;
u8 fv_word_idx = 0;
u16 sub_recps;
+ int status;
bitmap_zero(result_bm, ICE_MAX_FV_WORDS);
/* we need a buffer big enough to accommodate all the recipes */
tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
if (!tmp)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
tmp[0].recipe_indx = rid;
status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
@@ -1284,7 +1284,7 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
GFP_KERNEL);
if (!rg_entry) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_unroll;
}
@@ -1364,7 +1364,7 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
GFP_KERNEL);
if (!recps[rid].root_buf) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_unroll;
}
@@ -1407,19 +1407,19 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
/* ice_get_initial_sw_cfg - Get initial port and default VSI data
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
+int ice_get_initial_sw_cfg(struct ice_hw *hw)
{
struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
- enum ice_status status;
u16 req_desc = 0;
u16 num_elems;
+ int status;
u16 i;
rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
GFP_KERNEL);
if (!rbuf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Multiple calls to ice_aq_get_sw_cfg may be required
* to get all the switch configuration information. The need
@@ -1670,7 +1670,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
* Create a large action to hold software marker and update the switch rule
* entry pointed by m_ent with newly created large action
*/
-static enum ice_status
+static int
ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
u16 sw_marker, u16 l_id)
{
@@ -1681,14 +1681,14 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
* 3. GENERIC VALUE action to hold the software marker ID
*/
const u16 num_lg_acts = 3;
- enum ice_status status;
u16 lg_act_size;
u16 rules_size;
+ int status;
u32 act;
u16 id;
if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Create two back-to-back switch rules and submit them to the HW using
* one memory buffer:
@@ -1699,7 +1699,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
if (!lg_act)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
@@ -1808,19 +1808,19 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
* Call AQ command to add a new switch rule or update existing switch rule
* using the given VSI list ID
*/
-static enum ice_status
+static int
ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
enum ice_sw_lkup_type lkup_type)
{
struct ice_aqc_sw_rules_elem *s_rule;
- enum ice_status status;
u16 s_rule_size;
u16 rule_type;
+ int status;
int i;
if (!num_vsi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (lkup_type == ICE_SW_LKUP_MAC ||
lkup_type == ICE_SW_LKUP_MAC_VLAN ||
@@ -1834,15 +1834,15 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
else
- return ICE_ERR_PARAM;
+ return -EINVAL;
s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
for (i = 0; i < num_vsi; i++) {
if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto exit;
}
/* AQ call requires hw_vsi_id(s) */
@@ -1869,11 +1869,11 @@ exit:
* @vsi_list_id: stores the ID of the VSI list to be created
* @lkup_type: switch rule filter's lookup type
*/
-static enum ice_status
+static int
ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
{
- enum ice_status status;
+ int status;
status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
ice_aqc_opc_alloc_res);
@@ -1895,7 +1895,7 @@ ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
* to the corresponding filter management list to track this switch rule
* and VSI mapping
*/
-static enum ice_status
+static int
ice_create_pkt_fwd_rule(struct ice_hw *hw,
struct ice_fltr_list_entry *f_entry)
{
@@ -1903,16 +1903,16 @@ ice_create_pkt_fwd_rule(struct ice_hw *hw,
struct ice_aqc_sw_rules_elem *s_rule;
enum ice_sw_lkup_type l_type;
struct ice_sw_recipe *recp;
- enum ice_status status;
+ int status;
s_rule = devm_kzalloc(ice_hw_to_dev(hw),
ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
GFP_KERNEL);
if (!fm_entry) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto ice_create_pkt_fwd_rule_exit;
}
@@ -1959,16 +1959,16 @@ ice_create_pkt_fwd_rule_exit:
* Call AQ command to update a previously created switch rule with a
* VSI list ID
*/
-static enum ice_status
+static int
ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
{
struct ice_aqc_sw_rules_elem *s_rule;
- enum ice_status status;
+ int status;
s_rule = devm_kzalloc(ice_hw_to_dev(hw),
ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
@@ -1988,13 +1988,13 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
*
* Updates unicast switch filter rules based on VEB/VEPA mode
*/
-enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
+int ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
{
struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_mgmt_list_entry *fm_entry;
- enum ice_status status = 0;
struct list_head *rule_head;
struct mutex *rule_lock; /* Lock to protect filter rule list */
+ int status = 0;
rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
@@ -2044,24 +2044,24 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
* Add the new VSI to the previously created VSI list set
* using the update switch rule command
*/
-static enum ice_status
+static int
ice_add_update_vsi_list(struct ice_hw *hw,
struct ice_fltr_mgmt_list_entry *m_entry,
struct ice_fltr_info *cur_fltr,
struct ice_fltr_info *new_fltr)
{
- enum ice_status status = 0;
u16 vsi_list_id = 0;
+ int status = 0;
if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
- return ICE_ERR_NOT_IMPL;
+ return -EOPNOTSUPP;
if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
(cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
- return ICE_ERR_NOT_IMPL;
+ return -EOPNOTSUPP;
if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
/* Only one entry existed in the mapping and it was not already
@@ -2073,7 +2073,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
/* A rule already exists with the new VSI being added */
if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
- return ICE_ERR_ALREADY_EXISTS;
+ return -EEXIST;
vsi_handle_arr[0] = cur_fltr->vsi_handle;
vsi_handle_arr[1] = new_fltr->vsi_handle;
@@ -2101,7 +2101,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
vsi_list_id);
if (!m_entry->vsi_list_info)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* If this entry was large action then the large action needs
* to be updated to point to FWD to VSI list
@@ -2116,7 +2116,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
enum ice_adminq_opc opcode;
if (!m_entry->vsi_list_info)
- return ICE_ERR_CFG;
+ return -EIO;
/* A rule already exists with the new VSI being added */
if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
@@ -2209,7 +2209,7 @@ ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
*
* Adds or updates the rule lists for a given recipe
*/
-static enum ice_status
+static int
ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
struct ice_fltr_list_entry *f_entry)
{
@@ -2217,10 +2217,10 @@ ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
struct ice_fltr_info *new_fltr, *cur_fltr;
struct ice_fltr_mgmt_list_entry *m_entry;
struct mutex *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status = 0;
+ int status = 0;
if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
f_entry->fltr_info.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
@@ -2255,18 +2255,18 @@ ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
* The VSI list should be emptied before this function is called to remove the
* VSI list.
*/
-static enum ice_status
+static int
ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
enum ice_sw_lkup_type lkup_type)
{
struct ice_aqc_sw_rules_elem *s_rule;
- enum ice_status status;
u16 s_rule_size;
+ int status;
s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
@@ -2288,21 +2288,21 @@ ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
* @fm_list: filter management entry for which the VSI list management needs to
* be done
*/
-static enum ice_status
+static int
ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
struct ice_fltr_mgmt_list_entry *fm_list)
{
enum ice_sw_lkup_type lkup_type;
- enum ice_status status = 0;
u16 vsi_list_id;
+ int status = 0;
if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
fm_list->vsi_count == 0)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* A rule with the VSI being removed does not exist */
if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
lkup_type = fm_list->fltr_info.lkup_type;
vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
@@ -2324,7 +2324,7 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
ICE_MAX_VSI);
if (!ice_is_vsi_valid(hw, rem_vsi_handle))
- return ICE_ERR_OUT_OF_RANGE;
+ return -EIO;
/* Make sure VSI list is empty before removing it below */
status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
@@ -2375,19 +2375,19 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
* @recp_id: recipe ID for which the rule needs to removed
* @f_entry: rule entry containing filter information
*/
-static enum ice_status
+static int
ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
struct ice_fltr_list_entry *f_entry)
{
struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_mgmt_list_entry *list_elem;
struct mutex *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status = 0;
bool remove_rule = false;
u16 vsi_handle;
+ int status = 0;
if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
f_entry->fltr_info.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
@@ -2395,14 +2395,14 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
mutex_lock(rule_lock);
list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
if (!list_elem) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto exit;
}
if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
remove_rule = true;
} else if (!list_elem->vsi_list_info) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto exit;
} else if (list_elem->vsi_list_info->ref_cnt > 1) {
/* a ref_cnt > 1 indicates that the vsi_list is being
@@ -2435,7 +2435,7 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
GFP_KERNEL);
if (!s_rule) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto exit;
}
@@ -2590,7 +2590,7 @@ bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
* check for duplicates in this case, removing duplicates from a given
* list should be taken care of in the caller of this function.
*/
-enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
+int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
{
struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
struct ice_fltr_list_entry *m_list_itr;
@@ -2598,12 +2598,12 @@ enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
u16 total_elem_left, s_rule_size;
struct ice_switch_info *sw;
struct mutex *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status = 0;
u16 num_unicast = 0;
+ int status = 0;
u8 elem_sent;
if (!m_list || !hw)
- return ICE_ERR_PARAM;
+ return -EINVAL;
s_rule = NULL;
sw = hw->switch_info;
@@ -2616,23 +2616,23 @@ enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
m_list_itr->fltr_info.flag = ICE_FLTR_TX;
vsi_handle = m_list_itr->fltr_info.vsi_handle;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
/* update the src in case it is VSI num */
if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
- return ICE_ERR_PARAM;
+ return -EINVAL;
m_list_itr->fltr_info.src = hw_vsi_id;
if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
is_zero_ether_addr(add))
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
/* Don't overwrite the unicast address */
mutex_lock(rule_lock);
if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
&m_list_itr->fltr_info)) {
mutex_unlock(rule_lock);
- return ICE_ERR_ALREADY_EXISTS;
+ return -EEXIST;
}
mutex_unlock(rule_lock);
num_unicast++;
@@ -2660,7 +2660,7 @@ enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
GFP_KERNEL);
if (!s_rule) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto ice_add_mac_exit;
}
@@ -2710,7 +2710,7 @@ enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*fm_entry), GFP_KERNEL);
if (!fm_entry) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto ice_add_mac_exit;
}
fm_entry->fltr_info = *f_info;
@@ -2737,7 +2737,7 @@ ice_add_mac_exit:
* @hw: pointer to the hardware structure
* @f_entry: filter entry containing one VLAN information
*/
-static enum ice_status
+static int
ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
{
struct ice_switch_info *sw = hw->switch_info;
@@ -2746,10 +2746,10 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
enum ice_sw_lkup_type lkup_type;
u16 vsi_list_id = 0, vsi_handle;
struct mutex *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status = 0;
+ int status = 0;
if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
f_entry->fltr_info.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
@@ -2757,10 +2757,10 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
/* VLAN ID should only be 12 bits */
if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (new_fltr->src_id != ICE_SRC_ID_VSI)
- return ICE_ERR_PARAM;
+ return -EINVAL;
new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
lkup_type = new_fltr->lkup_type;
@@ -2799,7 +2799,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
new_fltr);
if (!v_list_itr) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto exit;
}
/* reuse VSI list for new rule and increment ref_cnt */
@@ -2835,7 +2835,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
if (v_list_itr->vsi_count > 1 &&
v_list_itr->vsi_list_info->ref_cnt > 1) {
ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
- status = ICE_ERR_CFG;
+ status = -EIO;
goto exit;
}
@@ -2845,7 +2845,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
/* A rule already exists with the new VSI being added */
if (cur_handle == vsi_handle) {
- status = ICE_ERR_ALREADY_EXISTS;
+ status = -EEXIST;
goto exit;
}
@@ -2890,16 +2890,16 @@ exit:
* @hw: pointer to the hardware structure
* @v_list: list of VLAN entries and forwarding information
*/
-enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
+int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
{
struct ice_fltr_list_entry *v_list_itr;
if (!v_list || !hw)
- return ICE_ERR_PARAM;
+ return -EINVAL;
list_for_each_entry(v_list_itr, v_list, list_entry) {
if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
- return ICE_ERR_PARAM;
+ return -EINVAL;
v_list_itr->fltr_info.flag = ICE_FLTR_TX;
v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
if (v_list_itr->status)
@@ -2917,13 +2917,12 @@ enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
* the filter list with the necessary fields (including flags to
* indicate Tx or Rx rules).
*/
-enum ice_status
-ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
+int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
{
struct ice_fltr_list_entry *em_list_itr;
if (!em_list || !hw)
- return ICE_ERR_PARAM;
+ return -EINVAL;
list_for_each_entry(em_list_itr, em_list, list_entry) {
enum ice_sw_lkup_type l_type =
@@ -2931,7 +2930,7 @@ ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
l_type != ICE_SW_LKUP_ETHERTYPE)
- return ICE_ERR_PARAM;
+ return -EINVAL;
em_list_itr->status = ice_add_rule_internal(hw, l_type,
em_list_itr);
@@ -2946,13 +2945,12 @@ ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
* @hw: pointer to the hardware structure
* @em_list: list of ethertype or ethertype MAC entries
*/
-enum ice_status
-ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
+int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
{
struct ice_fltr_list_entry *em_list_itr, *tmp;
if (!em_list || !hw)
- return ICE_ERR_PARAM;
+ return -EINVAL;
list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
enum ice_sw_lkup_type l_type =
@@ -2960,7 +2958,7 @@ ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
l_type != ICE_SW_LKUP_ETHERTYPE)
- return ICE_ERR_PARAM;
+ return -EINVAL;
em_list_itr->status = ice_remove_rule_internal(hw, l_type,
em_list_itr);
@@ -3020,18 +3018,17 @@ ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
* add filter rule to set/unset given VSI as default VSI for the switch
* (represented by swid)
*/
-enum ice_status
-ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
+int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
{
struct ice_aqc_sw_rules_elem *s_rule;
struct ice_fltr_info f_info;
enum ice_adminq_opc opcode;
- enum ice_status status;
u16 s_rule_size;
u16 hw_vsi_id;
+ int status;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
@@ -3039,7 +3036,7 @@ ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
memset(&f_info, 0, sizeof(f_info));
@@ -3137,18 +3134,18 @@ ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
* This function removes either a MAC filter rule or a specific VSI from a
* VSI list for a multicast MAC address.
*
- * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
- * ice_add_mac. Caller should be aware that this call will only work if all
- * the entries passed into m_list were added previously. It will not attempt to
- * do a partial remove of entries that were found.
+ * Returns -ENOENT if a given entry was not added by ice_add_mac. Caller should
+ * be aware that this call will only work if all the entries passed into m_list
+ * were added previously. It will not attempt to do a partial remove of entries
+ * that were found.
*/
-enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
+int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
{
struct ice_fltr_list_entry *list_itr, *tmp;
struct mutex *rule_lock; /* Lock to protect filter rule list */
if (!m_list)
- return ICE_ERR_PARAM;
+ return -EINVAL;
rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
@@ -3157,11 +3154,11 @@ enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
u16 vsi_handle;
if (l_type != ICE_SW_LKUP_MAC)
- return ICE_ERR_PARAM;
+ return -EINVAL;
vsi_handle = list_itr->fltr_info.vsi_handle;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
list_itr->fltr_info.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, vsi_handle);
@@ -3174,7 +3171,7 @@ enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
&list_itr->fltr_info)) {
mutex_unlock(rule_lock);
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
}
mutex_unlock(rule_lock);
}
@@ -3192,19 +3189,18 @@ enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
* @hw: pointer to the hardware structure
* @v_list: list of VLAN entries and forwarding information
*/
-enum ice_status
-ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
+int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
{
struct ice_fltr_list_entry *v_list_itr, *tmp;
if (!v_list || !hw)
- return ICE_ERR_PARAM;
+ return -EINVAL;
list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
if (l_type != ICE_SW_LKUP_VLAN)
- return ICE_ERR_PARAM;
+ return -EINVAL;
v_list_itr->status = ice_remove_rule_internal(hw,
ICE_SW_LKUP_VLAN,
v_list_itr);
@@ -3242,7 +3238,7 @@ ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
* fltr_info.fwd_id fields. These are set such that later logic can
* extract which VSI to remove the fltr from, and pass on that information.
*/
-static enum ice_status
+static int
ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
struct list_head *vsi_list_head,
struct ice_fltr_info *fi)
@@ -3254,7 +3250,7 @@ ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
*/
tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
if (!tmp)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
tmp->fltr_info = *fi;
@@ -3285,17 +3281,17 @@ ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
* Note that this means all entries in vsi_list_head must be explicitly
* deallocated by the caller when done with list.
*/
-static enum ice_status
+static int
ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
struct list_head *lkup_list_head,
struct list_head *vsi_list_head)
{
struct ice_fltr_mgmt_list_entry *fm_entry;
- enum ice_status status = 0;
+ int status = 0;
/* check to make sure VSI ID is valid and within boundary */
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
@@ -3349,9 +3345,8 @@ static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
* @recp_id: recipe ID for which the rule needs to removed
* @v_list: list of promisc entries
*/
-static enum ice_status
-ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
- struct list_head *v_list)
+static int
+ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct list_head *v_list)
{
struct ice_fltr_list_entry *v_list_itr, *tmp;
@@ -3371,7 +3366,7 @@ ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
* @promisc_mask: mask of promiscuous config bits to clear
* @vid: VLAN ID to clear VLAN promiscuous
*/
-enum ice_status
+int
ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid)
{
@@ -3381,11 +3376,11 @@ ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
struct ice_fltr_mgmt_list_entry *itr;
struct list_head *rule_head;
struct mutex *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status = 0;
+ int status = 0;
u8 recipe_id;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
@@ -3444,20 +3439,20 @@ free_fltr_list:
* @promisc_mask: mask of promiscuous config bits
* @vid: VLAN ID to set VLAN promiscuous
*/
-enum ice_status
+int
ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
{
enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
struct ice_fltr_list_entry f_list_entry;
struct ice_fltr_info new_fltr;
- enum ice_status status = 0;
bool is_tx_fltr;
+ int status = 0;
u16 hw_vsi_id;
int pkt_type;
u8 recipe_id;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
memset(&new_fltr, 0, sizeof(new_fltr));
@@ -3558,7 +3553,7 @@ set_promisc_exit:
*
* Configure VSI with all associated VLANs to given promiscuous mode(s)
*/
-enum ice_status
+int
ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
bool rm_vlan_promisc)
{
@@ -3567,8 +3562,8 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
struct list_head vsi_list_head;
struct list_head *vlan_head;
struct mutex *vlan_lock; /* Lock to protect filter rule list */
- enum ice_status status;
u16 vlan_id;
+ int status;
INIT_LIST_HEAD(&vsi_list_head);
vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
@@ -3616,7 +3611,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
struct list_head *rule_head;
struct ice_fltr_list_entry *tmp;
struct mutex *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status;
+ int status;
INIT_LIST_HEAD(&remove_list_head);
rule_lock = &sw->recp_list[lkup].filt_rule_lock;
@@ -3681,19 +3676,19 @@ void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
* @num_items: number of entries requested for FD resource type
* @counter_id: counter index returned by AQ call
*/
-enum ice_status
+int
ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 *counter_id)
{
struct ice_aqc_alloc_free_res_elem *buf;
- enum ice_status status;
u16 buf_len;
+ int status;
/* Allocate resource */
buf_len = struct_size(buf, elem, 1);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
buf->num_elems = cpu_to_le16(num_items);
buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
@@ -3719,19 +3714,19 @@ exit:
* @num_items: number of entries to be freed for FD resource type
* @counter_id: counter ID resource which needs to be freed
*/
-enum ice_status
+int
ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 counter_id)
{
struct ice_aqc_alloc_free_res_elem *buf;
- enum ice_status status;
u16 buf_len;
+ int status;
/* Free resource */
buf_len = struct_size(buf, elem, 1);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
buf->num_elems = cpu_to_le16(num_items);
buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
@@ -3796,10 +3791,13 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
* ice_find_recp - find a recipe
* @hw: pointer to the hardware structure
* @lkup_exts: extension sequence to match
+ * @tun_type: type of recipe tunnel
*
* Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
*/
-static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
+static u16
+ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
+ enum ice_sw_tunnel_type tun_type)
{
bool refresh_required = true;
struct ice_sw_recipe *recp;
@@ -3860,8 +3858,9 @@ static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
}
/* If for "i"th recipe the found was never set to false
* then it means we found our match
+ * Also tun type of recipe needs to be checked
*/
- if (found)
+ if (found && recp[i].tun_type == tun_type)
return i; /* Return the recipe ID */
}
}
@@ -3937,7 +3936,7 @@ ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
* and start grouping them in 4-word groups. Each group makes up one
* recipe.
*/
-static enum ice_status
+static int
ice_create_first_fit_recp_def(struct ice_hw *hw,
struct ice_prot_lkup_ext *lkup_exts,
struct list_head *rg_list,
@@ -3961,7 +3960,7 @@ ice_create_first_fit_recp_def(struct ice_hw *hw,
sizeof(*entry),
GFP_KERNEL);
if (!entry)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
list_add(&entry->l_entry, rg_list);
grp = &entry->r_group;
(*recp_cnt)++;
@@ -3987,7 +3986,7 @@ ice_create_first_fit_recp_def(struct ice_hw *hw,
* Helper function to fill in the field vector indices for protocol-offset
* pairs. These indexes are then ultimately programmed into a recipe.
*/
-static enum ice_status
+static int
ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
struct list_head *rg_list)
{
@@ -4029,7 +4028,7 @@ ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
* invalid pair
*/
if (!found)
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
}
@@ -4111,7 +4110,7 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
* @rm: recipe management list entry
* @profiles: bitmap of profiles that will be associated.
*/
-static enum ice_status
+static int
ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
unsigned long *profiles)
{
@@ -4119,11 +4118,11 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
struct ice_aqc_recipe_data_elem *tmp;
struct ice_aqc_recipe_data_elem *buf;
struct ice_recp_grp_entry *entry;
- enum ice_status status;
u16 free_res_idx;
u16 recipe_count;
u8 chain_idx;
u8 recps = 0;
+ int status;
/* When more than one recipe are required, another recipe is needed to
* chain them together. Matching a tunnel metadata ID takes up one of
@@ -4139,22 +4138,22 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
if (rm->n_grp_count > 1) {
if (rm->n_grp_count > free_res_idx)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
rm->n_grp_count++;
}
if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
if (!tmp)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
GFP_KERNEL);
if (!buf) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_mem;
}
@@ -4214,7 +4213,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
*/
if (chain_idx >= ICE_MAX_FV_WORDS) {
ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
- status = ICE_ERR_MAX_LIMIT;
+ status = -ENOSPC;
goto err_unroll;
}
@@ -4245,7 +4244,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
sizeof(buf[0].recipe_bitmap));
} else {
- status = ICE_ERR_BAD_PTR;
+ status = -EINVAL;
goto err_unroll;
}
/* Applicable only for ROOT_RECIPE, set the fwd_priority for
@@ -4281,7 +4280,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
sizeof(*last_chain_entry),
GFP_KERNEL);
if (!last_chain_entry) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_unroll;
}
last_chain_entry->rid = rid;
@@ -4316,7 +4315,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
sizeof(buf[recps].recipe_bitmap));
} else {
- status = ICE_ERR_BAD_PTR;
+ status = -EINVAL;
goto err_unroll;
}
buf[recps].content.act_ctrl_fwd_priority = rm->priority;
@@ -4350,7 +4349,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
}
if (!idx_found) {
- status = ICE_ERR_OUT_OF_RANGE;
+ status = -EIO;
goto err_unroll;
}
@@ -4403,12 +4402,12 @@ err_mem:
* @rm: recipe management list entry
* @lkup_exts: lookup elements
*/
-static enum ice_status
+static int
ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
struct ice_prot_lkup_ext *lkup_exts)
{
- enum ice_status status;
u8 recp_count = 0;
+ int status;
rm->n_grp_count = 0;
@@ -4438,21 +4437,21 @@ ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
* @bm: bitmap of field vectors to consider
* @fv_list: pointer to a list that holds the returned field vectors
*/
-static enum ice_status
+static int
ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
unsigned long *bm, struct list_head *fv_list)
{
- enum ice_status status;
u8 *prot_ids;
+ int status;
u16 i;
prot_ids = kcalloc(lkups_cnt, sizeof(*prot_ids), GFP_KERNEL);
if (!prot_ids)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
for (i = 0; i < lkups_cnt; i++)
if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
- status = ICE_ERR_CFG;
+ status = -EIO;
goto free_mem;
}
@@ -4489,7 +4488,7 @@ static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
* @rinfo: other information regarding the rule e.g. priority and action info
* @lkup_exts: lookup word structure
*/
-static enum ice_status
+static int
ice_add_special_words(struct ice_adv_rule_info *rinfo,
struct ice_prot_lkup_ext *lkup_exts)
{
@@ -4506,7 +4505,7 @@ ice_add_special_words(struct ice_adv_rule_info *rinfo,
lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
lkup_exts->field_mask[word] = mask;
} else {
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
}
}
@@ -4557,7 +4556,7 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
* @rinfo: other information regarding the rule e.g. priority and action info
* @rid: return the recipe ID of the recipe created
*/
-static enum ice_status
+static int
ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
{
@@ -4568,16 +4567,16 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
struct ice_sw_fv_list_entry *fvit;
struct ice_recp_grp_entry *r_tmp;
struct ice_sw_fv_list_entry *tmp;
- enum ice_status status = 0;
struct ice_sw_recipe *rm;
+ int status = 0;
u8 i;
if (!lkups_cnt)
- return ICE_ERR_PARAM;
+ return -EINVAL;
lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL);
if (!lkup_exts)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Determine the number of words to be matched and if it exceeds a
* recipe's restrictions
@@ -4586,20 +4585,20 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
u16 count;
if (lkups[i].type >= ICE_PROTOCOL_LAST) {
- status = ICE_ERR_CFG;
+ status = -EIO;
goto err_free_lkup_exts;
}
count = ice_fill_valid_words(&lkups[i], lkup_exts);
if (!count) {
- status = ICE_ERR_CFG;
+ status = -EIO;
goto err_free_lkup_exts;
}
}
rm = kzalloc(sizeof(*rm), GFP_KERNEL);
if (!rm) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_free_lkup_exts;
}
@@ -4651,11 +4650,12 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
/* Look for a recipe which matches our requested fv / mask list */
- *rid = ice_find_recp(hw, lkup_exts);
+ *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
if (*rid < ICE_MAX_NUM_RECIPES)
/* Success if found a recipe that match the existing criteria */
goto err_unroll;
+ rm->tun_type = rinfo->tun_type;
/* Recipe we need does not exist, add a recipe */
status = ice_add_sw_recipe(hw, rm, profiles);
if (status)
@@ -4844,7 +4844,7 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
* @pkt_len: packet length of dummy packet
* @offsets: offset info for the dummy packet
*/
-static enum ice_status
+static int
ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
struct ice_aqc_sw_rules_elem *s_rule,
const u8 *dummy_pkt, u16 pkt_len,
@@ -4878,7 +4878,7 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
}
/* this should never happen in a correct calling sequence */
if (!found)
- return ICE_ERR_PARAM;
+ return -EINVAL;
switch (lkups[i].type) {
case ICE_MAC_OFOS:
@@ -4915,12 +4915,12 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
len = sizeof(struct ice_udp_tnl_hdr);
break;
default:
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
/* the length should be a word multiple */
if (len % ICE_BYTES_PER_WORD)
- return ICE_ERR_CFG;
+ return -EIO;
/* We have the offset to the header start, the length, the
* caller's header values and mask. Use this information to
@@ -4950,7 +4950,7 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
* @pkt: dummy packet to fill in
* @offsets: offset info for the dummy packet
*/
-static enum ice_status
+static int
ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
{
@@ -4958,11 +4958,13 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
switch (tun_type) {
case ICE_SW_TUN_VXLAN:
+ if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN))
+ return -EIO;
+ break;
case ICE_SW_TUN_GENEVE:
- if (!ice_get_open_tunnel_port(hw, &open_port))
- return ICE_ERR_CFG;
+ if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE))
+ return -EIO;
break;
-
default:
/* Nothing needs to be done for this tunnel type */
return 0;
@@ -4982,7 +4984,7 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
}
}
- return ICE_ERR_CFG;
+ return -EIO;
}
/**
@@ -5047,25 +5049,25 @@ ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
* Add the new VSI to the previously created VSI list set
* using the update switch rule command
*/
-static enum ice_status
+static int
ice_adv_add_update_vsi_list(struct ice_hw *hw,
struct ice_adv_fltr_mgmt_list_entry *m_entry,
struct ice_adv_rule_info *cur_fltr,
struct ice_adv_rule_info *new_fltr)
{
- enum ice_status status;
u16 vsi_list_id = 0;
+ int status;
if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
- return ICE_ERR_NOT_IMPL;
+ return -EOPNOTSUPP;
if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
(cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
- return ICE_ERR_NOT_IMPL;
+ return -EOPNOTSUPP;
if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
/* Only one entry existed in the mapping and it was not already
@@ -5078,7 +5080,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
/* A rule already exists with the new VSI being added */
if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
new_fltr->sw_act.fwd_id.hw_vsi_id)
- return ICE_ERR_ALREADY_EXISTS;
+ return -EEXIST;
vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
@@ -5111,7 +5113,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
u16 vsi_handle = new_fltr->sw_act.vsi_handle;
if (!m_entry->vsi_list_info)
- return ICE_ERR_CFG;
+ return -EIO;
/* A rule already exists with the new VSI being added */
if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
@@ -5153,7 +5155,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
* rinfo describes other information related to this rule such as forwarding
* IDs, priority of this rule, etc.
*/
-enum ice_status
+int
ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
struct ice_rule_query_data *added_entry)
@@ -5164,10 +5166,10 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
struct ice_aqc_sw_rules_elem *s_rule = NULL;
struct list_head *rule_head;
struct ice_switch_info *sw;
- enum ice_status status;
const u8 *pkt = NULL;
u16 word_cnt;
u32 act = 0;
+ int status;
u8 q_rgn;
/* Initialize profile to result index bitmap */
@@ -5177,7 +5179,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
if (!lkups_cnt)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* get # of words we need to match */
word_cnt = 0;
@@ -5191,13 +5193,13 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* make sure that we can locate a dummy packet */
ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
&pkt_offsets);
if (!pkt) {
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto err_ice_add_adv_rule;
}
@@ -5205,11 +5207,11 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
- return ICE_ERR_CFG;
+ return -EIO;
vsi_handle = rinfo->sw_act.vsi_handle;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
rinfo->sw_act.fwd_id.hw_vsi_id =
@@ -5243,7 +5245,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
if (!rinfo->flags_info.act_valid) {
act |= ICE_SINGLE_ACT_LAN_ENABLE;
act |= ICE_SINGLE_ACT_LB_ENABLE;
@@ -5277,7 +5279,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
ICE_SINGLE_ACT_VALID_BIT;
break;
default:
- status = ICE_ERR_CFG;
+ status = -EIO;
goto err_ice_add_adv_rule;
}
@@ -5322,14 +5324,14 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
sizeof(struct ice_adv_fltr_mgmt_list_entry),
GFP_KERNEL);
if (!adv_fltr) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_ice_add_adv_rule;
}
adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups,
lkups_cnt * sizeof(*lkups), GFP_KERNEL);
if (!adv_fltr->lkups) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_ice_add_adv_rule;
}
@@ -5372,12 +5374,12 @@ err_ice_add_adv_rule:
* Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
* It is required to pass valid VSI handle.
*/
-static enum ice_status
+static int
ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
struct list_head *list_head)
{
struct ice_fltr_mgmt_list_entry *itr;
- enum ice_status status = 0;
+ int status = 0;
u16 hw_vsi_id;
if (list_empty(list_head))
@@ -5426,22 +5428,22 @@ end:
* @fm_list: filter management entry for which the VSI list management needs to
* be done
*/
-static enum ice_status
+static int
ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
struct ice_adv_fltr_mgmt_list_entry *fm_list)
{
struct ice_vsi_list_map_info *vsi_list_info;
enum ice_sw_lkup_type lkup_type;
- enum ice_status status;
u16 vsi_list_id;
+ int status;
if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
fm_list->vsi_count == 0)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* A rule with the VSI being removed does not exist */
if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
lkup_type = ICE_SW_LKUP_LAST;
vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
@@ -5461,7 +5463,7 @@ ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
ICE_MAX_VSI);
if (!ice_is_vsi_valid(hw, rem_vsi_handle))
- return ICE_ERR_OUT_OF_RANGE;
+ return -EIO;
/* Make sure VSI list is empty before removing it below */
status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
@@ -5525,27 +5527,27 @@ ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
* header. rinfo describes other information related to this rule such as
* forwarding IDs, priority of this rule, etc.
*/
-static enum ice_status
+static int
ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
{
struct ice_adv_fltr_mgmt_list_entry *list_elem;
struct ice_prot_lkup_ext lkup_exts;
- enum ice_status status = 0;
bool remove_rule = false;
struct mutex *rule_lock; /* Lock to protect filter rule list */
u16 i, rid, vsi_handle;
+ int status = 0;
memset(&lkup_exts, 0, sizeof(lkup_exts));
for (i = 0; i < lkups_cnt; i++) {
u16 count;
if (lkups[i].type >= ICE_PROTOCOL_LAST)
- return ICE_ERR_CFG;
+ return -EIO;
count = ice_fill_valid_words(&lkups[i], &lkup_exts);
if (!count)
- return ICE_ERR_CFG;
+ return -EIO;
}
/* Create any special protocol/offset pairs, such as looking at tunnel
@@ -5555,10 +5557,10 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
if (status)
return status;
- rid = ice_find_recp(hw, &lkup_exts);
+ rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
/* If did not find a recipe that match the existing criteria */
if (rid == ICE_MAX_NUM_RECIPES)
- return ICE_ERR_PARAM;
+ return -EINVAL;
rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
@@ -5590,7 +5592,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
s_rule->pdata.lkup_tx_rx.act = 0;
s_rule->pdata.lkup_tx_rx.index =
cpu_to_le16(list_elem->rule_info.fltr_rule_id);
@@ -5598,7 +5600,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
rule_buf_sz, 1,
ice_aqc_opc_remove_sw_rules, NULL);
- if (!status || status == ICE_ERR_DOES_NOT_EXIST) {
+ if (!status || status == -ENOENT) {
struct ice_switch_info *sw = hw->switch_info;
mutex_lock(rule_lock);
@@ -5623,7 +5625,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
* the remove_entry parameter. This function will remove rule for a given
* vsi_handle with a given rule_id which is passed as parameter in remove_entry
*/
-enum ice_status
+int
ice_rem_adv_rule_by_id(struct ice_hw *hw,
struct ice_rule_query_data *remove_entry)
{
@@ -5634,7 +5636,7 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw,
sw = hw->switch_info;
if (!sw->recp_list[remove_entry->rid].recp_created)
- return ICE_ERR_PARAM;
+ return -EINVAL;
list_head = &sw->recp_list[remove_entry->rid].filt_rules;
list_for_each_entry(list_itr, list_head, list_entry) {
if (list_itr->rule_info.fltr_rule_id ==
@@ -5646,7 +5648,7 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw,
}
}
/* either list is empty or unable to find rule */
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
}
/**
@@ -5656,10 +5658,10 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw,
*
* Replays filters for requested VSI via vsi_handle.
*/
-enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
+int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
{
struct ice_switch_info *sw = hw->switch_info;
- enum ice_status status = 0;
+ int status = 0;
u8 i;
for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index d8a38906f16f..d8334beaaa8a 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -256,7 +256,7 @@ struct ice_vsi_list_map_info {
struct ice_fltr_list_entry {
struct list_head list_entry;
- enum ice_status status;
+ int status;
struct ice_fltr_info fltr_info;
};
@@ -302,75 +302,69 @@ enum ice_promisc_flags {
};
/* VSI related commands */
-enum ice_status
+int
ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
bool keep_vsi_alloc, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd);
bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle);
struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle);
void ice_clear_all_vsi_ctx(struct ice_hw *hw);
/* Switch config */
-enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
+int ice_get_initial_sw_cfg(struct ice_hw *hw);
-enum ice_status
+int
ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 *counter_id);
-enum ice_status
+int
ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 counter_id);
/* Switch/bridge related commands */
-enum ice_status
+int
ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
struct ice_rule_query_data *added_entry);
-enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
-enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
-enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst);
-enum ice_status
-ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list);
-enum ice_status
-ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list);
-int
-ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable);
+int ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
+int ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
+int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list);
+int ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
+int ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst);
bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle);
bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle);
+int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list);
+int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list);
+int ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable);
void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status
-ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
-enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list);
/* Promisc/defport setup for VSIs */
-enum ice_status
-ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction);
-enum ice_status
+int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction);
+int
ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid);
-enum ice_status
+int
ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid);
-enum ice_status
+int
ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
bool rm_vlan_promisc);
-enum ice_status
-ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status
+int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle);
+int
ice_rem_adv_rule_by_id(struct ice_hw *hw,
struct ice_rule_query_data *remove_entry);
-enum ice_status ice_init_def_sw_recp(struct ice_hw *hw);
+int ice_init_def_sw_recp(struct ice_hw *hw);
u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle);
+int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle);
void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw);
-enum ice_status
+int
ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd);
#endif /* _ICE_SWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index e5d23feb6701..e8aab664270a 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -74,21 +74,13 @@ static enum ice_protocol_type ice_proto_type_from_ipv6(bool inner)
return inner ? ICE_IPV6_IL : ICE_IPV6_OFOS;
}
-static enum ice_protocol_type
-ice_proto_type_from_l4_port(bool inner, u16 ip_proto)
+static enum ice_protocol_type ice_proto_type_from_l4_port(u16 ip_proto)
{
- if (inner) {
- switch (ip_proto) {
- case IPPROTO_UDP:
- return ICE_UDP_ILOS;
- }
- } else {
- switch (ip_proto) {
- case IPPROTO_TCP:
- return ICE_TCP_IL;
- case IPPROTO_UDP:
- return ICE_UDP_OF;
- }
+ switch (ip_proto) {
+ case IPPROTO_TCP:
+ return ICE_TCP_IL;
+ case IPPROTO_UDP:
+ return ICE_UDP_ILOS;
}
return 0;
@@ -191,8 +183,9 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
i++;
}
- if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) {
- list[i].type = ice_proto_type_from_l4_port(false, hdr->l3_key.ip_proto);
+ if ((flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) &&
+ hdr->l3_key.ip_proto == IPPROTO_UDP) {
+ list[i].type = ICE_UDP_OF;
list[i].h_u.l4_hdr.dst_port = hdr->l4_key.dst_port;
list[i].m_u.l4_hdr.dst_port = hdr->l4_mask.dst_port;
i++;
@@ -317,7 +310,7 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
ICE_TC_FLWR_FIELD_SRC_L4_PORT)) {
struct ice_tc_l4_hdr *l4_key, *l4_mask;
- list[i].type = ice_proto_type_from_l4_port(inner, headers->l3_key.ip_proto);
+ list[i].type = ice_proto_type_from_l4_port(headers->l3_key.ip_proto);
l4_key = &headers->l4_key;
l4_mask = &headers->l4_mask;
@@ -405,9 +398,8 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
struct ice_hw *hw = &vsi->back->hw;
struct ice_adv_lkup_elem *list;
u32 flags = fltr->flags;
- enum ice_status status;
int lkups_cnt;
- int ret = 0;
+ int ret;
int i;
if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) {
@@ -456,14 +448,13 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
/* specify the cookie as filter_rule_id */
rule_info.fltr_rule_id = fltr->cookie;
- status = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
- if (status == ICE_ERR_ALREADY_EXISTS) {
+ ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
+ if (ret == -EEXIST) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist");
ret = -EINVAL;
goto exit;
- } else if (status) {
+ } else if (ret) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error");
- ret = -EIO;
goto exit;
}
@@ -802,7 +793,8 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
headers->l3_mask.ttl = match.mask->ttl;
}
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) &&
+ fltr->tunnel_type != TNL_VXLAN && fltr->tunnel_type != TNL_GENEVE) {
struct flow_match_ports match;
flow_rule_match_enc_ports(rule, &match);
@@ -1168,7 +1160,7 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
rule_rem.vsi_handle = fltr->dest_id;
err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem);
if (err) {
- if (err == ICE_ERR_DOES_NOT_EXIST) {
+ if (err == -ENOENT) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist");
return -ENOENT;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 9e0c2923c62e..b8644adfba78 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -7,7 +7,6 @@
#define ICE_BYTES_PER_WORD 2
#define ICE_BYTES_PER_DWORD 4
-#include "ice_status.h"
#include "ice_hw_autogen.h"
#include "ice_osdep.h"
#include "ice_controlq.h"
@@ -873,8 +872,6 @@ struct ice_hw {
u8 active_pkg_name[ICE_PKG_NAME_SIZE];
u8 active_pkg_in_nvm;
- enum ice_aq_err pkg_dwnld_status;
-
/* Driver's package ver - (from the Ice Metadata section) */
struct ice_pkg_ver pkg_ver;
u8 pkg_name[ICE_PKG_NAME_SIZE];
@@ -919,6 +916,7 @@ struct ice_hw {
struct mutex rss_locks; /* protect RSS configuration */
struct list_head rss_list_head;
struct ice_mbx_snapshot mbx_snapshot;
+ DECLARE_BITMAP(hw_ptype, ICE_FLOW_PTYPE_MAX);
u16 io_expander_handle;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index eee180d8c024..d64df81d4893 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -47,197 +47,6 @@ struct virtchnl_fdir_fltr_conf {
u32 flow_id;
};
-static enum virtchnl_proto_hdr_type vc_pattern_ether[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_tcp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_TCP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_udp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_sctp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_SCTP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_tcp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_TCP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_udp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_sctp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_SCTP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_GTPU_IP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu_eh[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_GTPU_IP,
- VIRTCHNL_PROTO_HDR_GTPU_EH,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_l2tpv3[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_L2TPV3,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_l2tpv3[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_L2TPV3,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_esp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_ESP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_esp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_ESP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_ah[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_AH,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_ah[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_AH,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_nat_t_esp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_ESP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_nat_t_esp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_ESP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_pfcp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_PFCP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_pfcp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_PFCP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-struct virtchnl_fdir_pattern_match_item {
- enum virtchnl_proto_hdr_type *list;
- u64 input_set;
- u64 *meta;
-};
-
-static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_os[] = {
- {vc_pattern_ipv4, 0, NULL},
- {vc_pattern_ipv4_tcp, 0, NULL},
- {vc_pattern_ipv4_udp, 0, NULL},
- {vc_pattern_ipv4_sctp, 0, NULL},
- {vc_pattern_ipv6, 0, NULL},
- {vc_pattern_ipv6_tcp, 0, NULL},
- {vc_pattern_ipv6_udp, 0, NULL},
- {vc_pattern_ipv6_sctp, 0, NULL},
-};
-
-static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_comms[] = {
- {vc_pattern_ipv4, 0, NULL},
- {vc_pattern_ipv4_tcp, 0, NULL},
- {vc_pattern_ipv4_udp, 0, NULL},
- {vc_pattern_ipv4_sctp, 0, NULL},
- {vc_pattern_ipv6, 0, NULL},
- {vc_pattern_ipv6_tcp, 0, NULL},
- {vc_pattern_ipv6_udp, 0, NULL},
- {vc_pattern_ipv6_sctp, 0, NULL},
- {vc_pattern_ether, 0, NULL},
- {vc_pattern_ipv4_gtpu, 0, NULL},
- {vc_pattern_ipv4_gtpu_eh, 0, NULL},
- {vc_pattern_ipv4_l2tpv3, 0, NULL},
- {vc_pattern_ipv6_l2tpv3, 0, NULL},
- {vc_pattern_ipv4_esp, 0, NULL},
- {vc_pattern_ipv6_esp, 0, NULL},
- {vc_pattern_ipv4_ah, 0, NULL},
- {vc_pattern_ipv6_ah, 0, NULL},
- {vc_pattern_ipv4_nat_t_esp, 0, NULL},
- {vc_pattern_ipv6_nat_t_esp, 0, NULL},
- {vc_pattern_ipv4_pfcp, 0, NULL},
- {vc_pattern_ipv6_pfcp, 0, NULL},
-};
-
struct virtchnl_fdir_inset_map {
enum virtchnl_proto_hdr_field field;
enum ice_flow_field fld;
@@ -751,7 +560,6 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
struct ice_flow_seg_info *old_seg;
struct ice_flow_prof *prof = NULL;
struct ice_fd_hw_prof *vf_prof;
- enum ice_status status;
struct device *dev;
struct ice_pf *pf;
struct ice_hw *hw;
@@ -794,29 +602,26 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow,
tun ? ICE_FLTR_PTYPE_MAX : 0);
- status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
- tun + 1, &prof);
- ret = ice_status_to_errno(status);
+ ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
+ tun + 1, &prof);
if (ret) {
dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
flow, vf->vf_id);
goto err_exit;
}
- status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
- vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
- seg, &entry1_h);
- ret = ice_status_to_errno(status);
+ ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
+ vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
+ seg, &entry1_h);
if (ret) {
dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n",
flow, vf->vf_id);
goto err_prof;
}
- status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
- ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
- seg, &entry2_h);
- ret = ice_status_to_errno(status);
+ ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
+ ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
+ seg, &entry2_h);
if (ret) {
dev_dbg(dev,
"Could not add flow 0x%x Ctrl VSI entry for VF %d\n",
@@ -911,83 +716,6 @@ err_exit:
}
/**
- * ice_vc_fdir_match_pattern
- * @fltr: virtual channel add cmd buffer
- * @type: virtual channel protocol filter header type
- *
- * Matching the header type by comparing fltr and type's value.
- *
- * Return: true on success, and false on error.
- */
-static bool
-ice_vc_fdir_match_pattern(struct virtchnl_fdir_add *fltr,
- enum virtchnl_proto_hdr_type *type)
-{
- struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
- int i = 0;
-
- while ((i < proto->count) &&
- (*type == proto->proto_hdr[i].type) &&
- (*type != VIRTCHNL_PROTO_HDR_NONE)) {
- type++;
- i++;
- }
-
- return ((i == proto->count) && (*type == VIRTCHNL_PROTO_HDR_NONE));
-}
-
-/**
- * ice_vc_fdir_get_pattern - get while list pattern
- * @vf: pointer to the VF info
- * @len: filter list length
- *
- * Return: pointer to allowed filter list
- */
-static const struct virtchnl_fdir_pattern_match_item *
-ice_vc_fdir_get_pattern(struct ice_vf *vf, int *len)
-{
- const struct virtchnl_fdir_pattern_match_item *item;
- struct ice_pf *pf = vf->pf;
- struct ice_hw *hw;
-
- hw = &pf->hw;
- if (!strncmp(hw->active_pkg_name, "ICE COMMS Package",
- sizeof(hw->active_pkg_name))) {
- item = vc_fdir_pattern_comms;
- *len = ARRAY_SIZE(vc_fdir_pattern_comms);
- } else {
- item = vc_fdir_pattern_os;
- *len = ARRAY_SIZE(vc_fdir_pattern_os);
- }
-
- return item;
-}
-
-/**
- * ice_vc_fdir_search_pattern
- * @vf: pointer to the VF info
- * @fltr: virtual channel add cmd buffer
- *
- * Search for matched pattern from supported pattern list
- *
- * Return: 0 on success, and other on error.
- */
-static int
-ice_vc_fdir_search_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr)
-{
- const struct virtchnl_fdir_pattern_match_item *pattern;
- int len, i;
-
- pattern = ice_vc_fdir_get_pattern(vf, &len);
-
- for (i = 0; i < len; i++)
- if (ice_vc_fdir_match_pattern(fltr, pattern[i].list))
- return 0;
-
- return -EINVAL;
-}
-
-/**
* ice_vc_fdir_parse_pattern
* @vf: pointer to the VF info
* @fltr: virtual channel add cmd buffer
@@ -1299,11 +1027,11 @@ static int
ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
struct virtchnl_fdir_fltr_conf *conf)
{
+ struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
int ret;
- ret = ice_vc_fdir_search_pattern(vf, fltr);
- if (ret)
- return ret;
+ if (!ice_vc_validate_pattern(vf, proto))
+ return -EINVAL;
ret = ice_vc_fdir_parse_pattern(vf, fltr, conf);
if (ret)
@@ -1467,7 +1195,6 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
struct ice_fdir_fltr *input = &conf->input;
struct ice_vsi *vsi, *ctrl_vsi;
struct ice_fltr_desc desc;
- enum ice_status status;
struct device *dev;
struct ice_pf *pf;
struct ice_hw *hw;
@@ -1497,8 +1224,7 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
return -ENOMEM;
ice_fdir_get_prgm_desc(hw, input, &desc, add);
- status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
- ret = ice_status_to_errno(status);
+ ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
if (ret) {
dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
vf->vf_id, input->flow_type);
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 217ff5e9a6f1..61b2db3342ed 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -9,6 +9,7 @@
#include "ice_flow.h"
#include "ice_eswitch.h"
#include "ice_virtchnl_allowlist.h"
+#include "ice_flex_pipe.h"
#define FIELD_SELECTOR(proto_hdr_field) \
BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
@@ -18,18 +19,7 @@ struct ice_vc_hdr_match_type {
u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */
};
-static const struct ice_vc_hdr_match_type ice_vc_hdr_list_os[] = {
- {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
- {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 |
- ICE_FLOW_SEG_HDR_IPV_OTHER},
- {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 |
- ICE_FLOW_SEG_HDR_IPV_OTHER},
- {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP},
- {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP},
- {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP},
-};
-
-static const struct ice_vc_hdr_match_type ice_vc_hdr_list_comms[] = {
+static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = {
{VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
{VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH},
{VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN},
@@ -67,83 +57,7 @@ struct ice_vc_hash_field_match_type {
};
static const struct
-ice_vc_hash_field_match_type ice_vc_hash_field_list_os[] = {
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
- ICE_FLOW_HASH_IPV4},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
- ICE_FLOW_HASH_IPV6},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
- ICE_FLOW_HASH_TCP_PORT},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
- ICE_FLOW_HASH_UDP_PORT},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
- ICE_FLOW_HASH_SCTP_PORT},
-};
-
-static const struct
-ice_vc_hash_field_match_type ice_vc_hash_field_list_comms[] = {
+ice_vc_hash_field_match_type ice_vc_hash_field_list[] = {
{VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
{VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
@@ -289,37 +203,6 @@ static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
}
/**
- * ice_err_to_virt_err - translate errors for VF return code
- * @ice_err: error return code
- */
-static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
-{
- switch (ice_err) {
- case ICE_SUCCESS:
- return VIRTCHNL_STATUS_SUCCESS;
- case ICE_ERR_BAD_PTR:
- case ICE_ERR_INVAL_SIZE:
- case ICE_ERR_DEVICE_NOT_SUPPORTED:
- case ICE_ERR_PARAM:
- case ICE_ERR_CFG:
- return VIRTCHNL_STATUS_ERR_PARAM;
- case ICE_ERR_NO_MEMORY:
- return VIRTCHNL_STATUS_ERR_NO_MEMORY;
- case ICE_ERR_NOT_READY:
- case ICE_ERR_RESET_FAILED:
- case ICE_ERR_FW_API_VER:
- case ICE_ERR_AQ_ERROR:
- case ICE_ERR_AQ_TIMEOUT:
- case ICE_ERR_AQ_FULL:
- case ICE_ERR_AQ_NO_WORK:
- case ICE_ERR_AQ_EMPTY:
- return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
- default:
- return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
- }
-}
-
-/**
* ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
* @pf: pointer to the PF structure
* @v_opcode: operation code
@@ -770,8 +653,7 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
struct ice_hw *hw = &vsi->back->hw;
struct ice_aqc_vsi_props *info;
struct ice_vsi_ctx *ctxt;
- enum ice_status status;
- int ret = 0;
+ int ret;
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
@@ -794,12 +676,10 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
ICE_AQ_VSI_PROP_SW_VALID);
- status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
- if (status) {
- dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- ret = -EIO;
+ ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (ret) {
+ dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %s\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status));
goto out;
}
@@ -968,8 +848,8 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
{
struct device *dev = ice_pf_to_dev(vf->pf);
struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- enum ice_status status;
u8 broadcast[ETH_ALEN];
+ int status;
if (ice_is_eswitch_mode_switchdev(vf->pf))
return 0;
@@ -977,9 +857,9 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
eth_broadcast_addr(broadcast);
status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
if (status) {
- dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %s\n",
- vf->vf_id, ice_stat_str(status));
- return ice_status_to_errno(status);
+ dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
+ vf->vf_id, status);
+ return status;
}
vf->num_mac++;
@@ -988,10 +868,10 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr,
ICE_FWD_TO_VSI);
if (status) {
- dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %s\n",
+ dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
&vf->hw_lan_addr.addr[0], vf->vf_id,
- ice_stat_str(status));
- return ice_status_to_errno(status);
+ status);
+ return status;
}
vf->num_mac++;
@@ -1341,45 +1221,50 @@ static void ice_clear_vf_reset_trigger(struct ice_vf *vf)
ice_flush(hw);
}
-/**
- * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
- * @vf: pointer to the VF info
- * @vsi: the VSI being configured
- * @promisc_m: mask of promiscuous config bits
- * @rm_promisc: promisc flag request from the VF to remove or add filter
- *
- * This function configures VF VSI promiscuous mode, based on the VF requests,
- * for Unicast, Multicast and VLAN
- */
-static enum ice_status
-ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
- bool rm_promisc)
+static int
+ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
{
- struct ice_pf *pf = vf->pf;
- enum ice_status status = 0;
- struct ice_hw *hw;
+ struct ice_hw *hw = &vsi->back->hw;
+ int status;
- hw = &pf->hw;
- if (vsi->num_vlan) {
- status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
- rm_promisc);
- } else if (vf->port_vlan_info) {
- if (rm_promisc)
- status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
- vf->port_vlan_info);
- else
- status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
- vf->port_vlan_info);
- } else {
- if (rm_promisc)
- status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
- 0);
- else
- status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
- 0);
+ if (vf->port_vlan_info)
+ status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m,
+ vf->port_vlan_info & VLAN_VID_MASK);
+ else if (vsi->num_vlan > 1)
+ status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m);
+ else
+ status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0);
+
+ if (status && status != -EEXIST) {
+ dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
+ vf->vf_id, status);
+ return status;
}
- return status;
+ return 0;
+}
+
+static int
+ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ int status;
+
+ if (vf->port_vlan_info)
+ status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m,
+ vf->port_vlan_info & VLAN_VID_MASK);
+ else if (vsi->num_vlan > 1)
+ status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m);
+ else
+ status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0);
+
+ if (status && status != -ENOENT) {
+ dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
+ vf->vf_id, status);
+ return status;
+ }
+
+ return 0;
}
static void ice_vf_clear_counters(struct ice_vf *vf)
@@ -1415,8 +1300,8 @@ static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
- enum ice_status status;
struct device *dev;
+ int status;
if (!vsi->agg_node)
return;
@@ -1617,6 +1502,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
ice_vc_set_default_allowlist(vf);
ice_vf_fdir_exit(vf);
+ ice_vf_fdir_init(vf);
/* clean VF control VSI when resetting VFs since it should be
* setup only when VF creates its first FDIR rule.
*/
@@ -1742,11 +1628,12 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
else
promisc_m = ICE_UCAST_PROMISC_BITS;
- if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
+ if (ice_vf_clear_vsi_promisc(vf, vsi, promisc_m))
dev_err(dev, "disabling promiscuous mode failed\n");
}
ice_vf_fdir_exit(vf);
+ ice_vf_fdir_init(vf);
/* clean VF control VSI when resetting VF since it should be setup
* only when VF creates its first FDIR rule.
*/
@@ -1844,7 +1731,6 @@ static int ice_init_vf_vsi_res(struct ice_vf *vf)
{
struct ice_pf *pf = vf->pf;
u8 broadcast[ETH_ALEN];
- enum ice_status status;
struct ice_vsi *vsi;
struct device *dev;
int err;
@@ -1864,11 +1750,10 @@ static int ice_init_vf_vsi_res(struct ice_vf *vf)
}
eth_broadcast_addr(broadcast);
- status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
- if (status) {
- dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %s\n",
- vf->vf_id, ice_stat_str(status));
- err = ice_status_to_errno(status);
+ err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
+ if (err) {
+ dev_err(dev, "Failed to add broadcast MAC filter for VF %d, error %d\n",
+ vf->vf_id, err);
goto release_vsi;
}
@@ -2021,6 +1906,10 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
if (ret)
goto err_unroll_sriov;
+ /* rearm global interrupts */
+ if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
+ ice_irq_dynamic_ena(hw, NULL, NULL);
+
return 0;
err_unroll_sriov:
@@ -2110,7 +1999,6 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct ice_pf *pf = pci_get_drvdata(pdev);
struct device *dev = ice_pf_to_dev(pf);
- enum ice_status status;
int err;
err = ice_check_sriov_allowed(pf);
@@ -2130,9 +2018,9 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
return -EBUSY;
}
- status = ice_mbx_init_snapshot(&pf->hw, num_vfs);
- if (status)
- return ice_status_to_errno(status);
+ err = ice_mbx_init_snapshot(&pf->hw, num_vfs);
+ if (err)
+ return err;
err = ice_pci_sriov_ena(pf, num_vfs);
if (err) {
@@ -2266,9 +2154,9 @@ int
ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{
- enum ice_status aq_ret;
struct device *dev;
struct ice_pf *pf;
+ int aq_ret;
if (!vf)
return -EINVAL;
@@ -2300,8 +2188,8 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
msg, msglen, NULL);
if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
- dev_info(dev, "Unable to send the message to VF %d ret %s aq_err %s\n",
- vf->vf_id, ice_stat_str(aq_ret),
+ dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %s\n",
+ vf->vf_id, aq_ret,
ice_aq_str(pf->hw.mailboxq.sq_last_status));
return -EIO;
}
@@ -2550,6 +2438,100 @@ static bool ice_vc_isvalid_ring_len(u16 ring_len)
}
/**
+ * ice_vc_validate_pattern
+ * @vf: pointer to the VF info
+ * @proto: virtchnl protocol headers
+ *
+ * validate the pattern is supported or not.
+ *
+ * Return: true on success, false on error.
+ */
+bool
+ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto)
+{
+ bool is_ipv4 = false;
+ bool is_ipv6 = false;
+ bool is_udp = false;
+ u16 ptype = -1;
+ int i = 0;
+
+ while (i < proto->count &&
+ proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) {
+ switch (proto->proto_hdr[i].type) {
+ case VIRTCHNL_PROTO_HDR_ETH:
+ ptype = ICE_PTYPE_MAC_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_IPV4:
+ ptype = ICE_PTYPE_IPV4_PAY;
+ is_ipv4 = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_IPV6:
+ ptype = ICE_PTYPE_IPV6_PAY;
+ is_ipv6 = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_UDP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_UDP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_UDP_PAY;
+ is_udp = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_TCP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_TCP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_TCP_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_SCTP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_SCTP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_SCTP_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_GTPU_IP:
+ case VIRTCHNL_PROTO_HDR_GTPU_EH:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_GTPU;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_GTPU;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_L2TPV3:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_L2TPV3;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_L2TPV3;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_ESP:
+ if (is_ipv4)
+ ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP :
+ ICE_MAC_IPV4_ESP;
+ else if (is_ipv6)
+ ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP :
+ ICE_MAC_IPV6_ESP;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_AH:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_AH;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_AH;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_PFCP:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_PFCP_SESSION;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_PFCP_SESSION;
+ goto out;
+ default:
+ break;
+ }
+ i++;
+ }
+
+out:
+ return ice_hw_ptype_ena(&vf->pf->hw, ptype);
+}
+
+/**
* ice_vc_parse_rss_cfg - parses hash fields and headers from
* a specific virtchnl RSS cfg
* @hw: pointer to the hardware
@@ -2572,18 +2554,10 @@ ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg,
const struct ice_vc_hdr_match_type *hdr_list;
int i, hf_list_len, hdr_list_len;
- if (!strncmp(hw->active_pkg_name, "ICE COMMS Package",
- sizeof(hw->active_pkg_name))) {
- hf_list = ice_vc_hash_field_list_comms;
- hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list_comms);
- hdr_list = ice_vc_hdr_list_comms;
- hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list_comms);
- } else {
- hf_list = ice_vc_hash_field_list_os;
- hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list_os);
- hdr_list = ice_vc_hdr_list_os;
- hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list_os);
- }
+ hf_list = ice_vc_hash_field_list;
+ hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
+ hdr_list = ice_vc_hdr_list;
+ hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list);
for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
struct virtchnl_proto_hdr *proto_hdr =
@@ -2685,10 +2659,15 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
goto error_param;
}
+ if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
struct ice_vsi_ctx *ctx;
- enum ice_status status;
u8 lut_type, hash_type;
+ int status;
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR :
@@ -2717,9 +2696,8 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
if (status) {
- dev_err(dev, "update VSI for RSS failed, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
} else {
vsi->info.q_opt_rss = ctx->info.q_opt_rss;
@@ -2744,19 +2722,18 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
vsi->vsi_num, v_ret);
}
} else {
- enum ice_status status;
+ int status;
status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds,
addl_hdrs);
- /* We just ignore ICE_ERR_DOES_NOT_EXIST, because
- * if two configurations share the same profile remove
- * one of them actually removes both, since the
- * profile is deleted.
+ /* We just ignore -ENOENT, because if two configurations
+ * share the same profile remove one of them actually
+ * removes both, since the profile is deleted.
*/
- if (status && status != ICE_ERR_DOES_NOT_EXIST) {
+ if (status && status != -ENOENT) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%s\n",
- vf->vf_id, ice_stat_str(status));
+ dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n",
+ vf->vf_id, status);
}
}
}
@@ -2914,7 +2891,6 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
struct ice_pf *pf = np->vsi->back;
struct ice_vsi_ctx *ctx;
struct ice_vsi *vf_vsi;
- enum ice_status status;
struct device *dev;
struct ice_vf *vf;
int ret;
@@ -2964,12 +2940,10 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
}
- status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
- if (status) {
- dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %s\n",
- ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num,
- ice_stat_str(status));
- ret = -EIO;
+ ret = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
+ if (ret) {
+ dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d\n",
+ ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, ret);
goto out;
}
@@ -3015,10 +2989,10 @@ bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
{
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- enum ice_status mcast_status = 0, ucast_status = 0;
bool rm_promisc, alluni = false, allmulti = false;
struct virtchnl_promisc_info *info =
(struct virtchnl_promisc_info *)msg;
+ int mcast_err = 0, ucast_err = 0;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
struct device *dev;
@@ -3100,24 +3074,21 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
ucast_m = ICE_UCAST_PROMISC_BITS;
}
- ucast_status = ice_vf_set_vsi_promisc(vf, vsi, ucast_m,
- !alluni);
- if (ucast_status) {
- dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed\n",
- alluni ? "en" : "dis", vf->vf_id);
- v_ret = ice_err_to_virt_err(ucast_status);
- }
+ if (alluni)
+ ucast_err = ice_vf_set_vsi_promisc(vf, vsi, ucast_m);
+ else
+ ucast_err = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
- mcast_status = ice_vf_set_vsi_promisc(vf, vsi, mcast_m,
- !allmulti);
- if (mcast_status) {
- dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed\n",
- allmulti ? "en" : "dis", vf->vf_id);
- v_ret = ice_err_to_virt_err(mcast_status);
- }
+ if (allmulti)
+ mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m);
+ else
+ mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
+
+ if (ucast_err || mcast_err)
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
}
- if (!mcast_status) {
+ if (!mcast_err) {
if (allmulti &&
!test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
dev_info(dev, "VF %u successfully set multicast promiscuous mode\n",
@@ -3127,7 +3098,7 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
vf->vf_id);
}
- if (!ucast_status) {
+ if (!ucast_err) {
if (alluni && !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
dev_info(dev, "VF %u successfully set unicast promiscuous mode\n",
vf->vf_id);
@@ -3807,8 +3778,7 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
{
struct device *dev = ice_pf_to_dev(vf->pf);
u8 *mac_addr = vc_ether_addr->addr;
- enum ice_status status;
- int ret = 0;
+ int ret;
/* device MAC already added */
if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr))
@@ -3819,18 +3789,17 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
return -EPERM;
}
- status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
- if (status == ICE_ERR_ALREADY_EXISTS) {
+ ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
+ if (ret == -EEXIST) {
dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr,
vf->vf_id);
/* don't return since we might need to update
* the primary MAC in ice_vfhw_mac_add() below
*/
- ret = -EEXIST;
- } else if (status) {
- dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n",
- mac_addr, vf->vf_id, ice_stat_str(status));
- return -EIO;
+ } else if (ret) {
+ dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n",
+ mac_addr, vf->vf_id, ret);
+ return ret;
} else {
vf->num_mac++;
}
@@ -3907,20 +3876,20 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
{
struct device *dev = ice_pf_to_dev(vf->pf);
u8 *mac_addr = vc_ether_addr->addr;
- enum ice_status status;
+ int status;
if (!ice_can_vf_change_mac(vf) &&
ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
return 0;
status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
- if (status == ICE_ERR_DOES_NOT_EXIST) {
+ if (status == -ENOENT) {
dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
vf->vf_id);
return -ENOENT;
} else if (status) {
- dev_err(dev, "Failed to delete MAC %pM for VF %d, error %s\n",
- mac_addr, vf->vf_id, ice_stat_str(status));
+ dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n",
+ mac_addr, vf->vf_id, status);
return -EIO;
}
@@ -5283,9 +5252,9 @@ ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
s16 vf_id = le16_to_cpu(event->desc.retval);
struct device *dev = ice_pf_to_dev(pf);
struct ice_mbx_data mbxdata;
- enum ice_status status;
bool malvf = false;
struct ice_vf *vf;
+ int status;
if (ice_validate_vf_id(pf, vf_id))
return false;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 7e28ecbbe7af..752487a1bdd6 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -203,6 +203,8 @@ void
ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
void ice_print_vfs_mdd_events(struct ice_pf *pf);
void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
+bool
+ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto);
struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf);
int
ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index ff55cb415b11..bb9a80847298 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -383,6 +383,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
while (i--) {
dma = xsk_buff_xdp_get_dma(*xdp);
rx_desc->read.pkt_addr = cpu_to_le64(dma);
+ rx_desc->wb.status_error0 = 0;
rx_desc++;
xdp++;
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 0011b15e678c..0ac4cc5eaa2d 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -1015,10 +1015,6 @@ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
bool is_l2 = false;
u32 regval;
- /* reserved for future extensions */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tsync_tx_ctl = 0;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 8e448288ee26..142c57b7a451 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -1718,24 +1718,26 @@ static void igc_add_rx_frag(struct igc_ring *rx_ring,
static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
struct igc_rx_buffer *rx_buffer,
- union igc_adv_rx_desc *rx_desc,
- unsigned int size)
+ struct xdp_buff *xdp)
{
- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+ unsigned int size = xdp->data_end - xdp->data;
unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
+ unsigned int metasize = xdp->data - xdp->data_meta;
struct sk_buff *skb;
/* prefetch first cache line of first page */
- net_prefetch(va);
+ net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
- skb = build_skb(va - IGC_SKB_PAD, truesize);
+ skb = build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
/* update pointers within the skb to store the data */
- skb_reserve(skb, IGC_SKB_PAD);
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
__skb_put(skb, size);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
igc_rx_buffer_flip(rx_buffer, truesize);
return skb;
@@ -1746,6 +1748,7 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
struct xdp_buff *xdp,
ktime_t timestamp)
{
+ unsigned int metasize = xdp->data - xdp->data_meta;
unsigned int size = xdp->data_end - xdp->data;
unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
void *va = xdp->data;
@@ -1753,10 +1756,11 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- net_prefetch(va);
+ net_prefetch(xdp->data_meta);
/* allocate a skb to store the frags */
- skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN);
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi,
+ IGC_RX_HDR_LEN + metasize);
if (unlikely(!skb))
return NULL;
@@ -1769,7 +1773,13 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN);
/* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+ memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta,
+ ALIGN(headlen + metasize, sizeof(long)));
+
+ if (metasize) {
+ skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
/* update all of the pointers */
size -= headlen;
@@ -2354,7 +2364,8 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
if (!skb) {
xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq);
xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring),
- igc_rx_offset(rx_ring) + pkt_offset, size, false);
+ igc_rx_offset(rx_ring) + pkt_offset,
+ size, true);
skb = igc_xdp_run_prog(adapter, &xdp);
}
@@ -2378,7 +2389,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
} else if (skb)
igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
else if (ring_uses_build_skb(rx_ring))
- skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size);
+ skb = igc_build_skb(rx_ring, rx_buffer, &xdp);
else
skb = igc_construct_skb(rx_ring, rx_buffer, &xdp,
timestamp);
@@ -2448,8 +2459,10 @@ static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
memcpy(__skb_put(skb, totalsize), xdp->data_meta, totalsize);
- if (metasize)
+ if (metasize) {
skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
return skb;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 30568e3544cd..71813fa8f928 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -560,10 +560,6 @@ static void igc_ptp_enable_tx_timestamp(struct igc_adapter *adapter)
static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
struct hwtstamp_config *config)
{
- /* reserved for future extensions */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
igc_ptp_disable_tx_timestamp(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 23ddfd79fc8b..336426a67ac1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -992,10 +992,6 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
bool is_l2 = false;
u32 regval;
- /* reserved for future extensions */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tsync_tx_ctl = 0;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 2368ae3f0e10..e3c5d64ba340 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -3948,7 +3948,7 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
} else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
/* SGMII mode receives the state from the PHY */
new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE;
- new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
+ new_clk = MVNETA_GMAC_1MS_CLOCK_ENABLE;
new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
MVNETA_GMAC_FORCE_LINK_PASS |
MVNETA_GMAC_CONFIG_MII_SPEED |
@@ -3960,7 +3960,7 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
} else {
/* 802.3z negotiation - only 1000base-X */
new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X;
- new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
+ new_clk = MVNETA_GMAC_1MS_CLOCK_ENABLE;
new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
MVNETA_GMAC_FORCE_LINK_PASS |
MVNETA_GMAC_CONFIG_MII_SPEED)) |
@@ -3973,6 +3973,10 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
new_an |= MVNETA_GMAC_AN_FLOW_CTRL_EN;
}
+ /* Set the 1ms clock divisor */
+ if (new_clk == MVNETA_GMAC_1MS_CLOCK_ENABLE)
+ new_clk |= clk_get_rate(pp->clk) / 1000;
+
/* Armada 370 documentation says we can only change the port mode
* and in-band enable when the link is down, so force it down
* while making these changes. We also do this for GMAC_CTRL2
@@ -5271,6 +5275,7 @@ static int mvneta_probe(struct platform_device *pdev)
pp->phylink_config.dev = &dev->dev;
pp->phylink_config.type = PHYLINK_NETDEV;
+ pp->phylink_config.legacy_pre_march2020 = true;
pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 |
MAC_100 | MAC_1000FD | MAC_2500FD;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index a48e804c46f2..b1cce4425296 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -2963,11 +2963,11 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
if (priv->percpu_pools) {
- err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->id, 0);
+ err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->logic_rxq, 0);
if (err < 0)
goto err_free_dma;
- err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->id, 0);
+ err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->logic_rxq, 0);
if (err < 0)
goto err_unregister_rxq_short;
@@ -5142,9 +5142,6 @@ static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- if (config.flags)
- return -EINVAL;
-
if (config.tx_type != HWTSTAMP_TX_OFF &&
config.tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
@@ -7454,7 +7451,7 @@ static int mvpp2_probe(struct platform_device *pdev)
shared = num_present_cpus() - priv->nthreads;
if (shared > 0)
- bitmap_fill(&priv->lock_map,
+ bitmap_set(&priv->lock_map, 0,
min_t(int, shared, MVPP2_MAX_THREADS));
for (i = 0; i < MVPP2_MAX_THREADS; i++) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index cb56e171ddd4..3ca6b942ebe2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -2341,7 +2341,7 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
goto free_regions;
break;
default:
- return err;
+ goto free_regions;
}
mw->mbox_wq = alloc_workqueue(name,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 1333edf1c361..6080ebd9bd94 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -2002,10 +2002,6 @@ int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
otx2_config_hw_tx_tstamp(pfvf, false);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index 0ef68fdd1f26..61c20907315f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -5,6 +5,8 @@
*
*/
+#include <linux/module.h>
+
#include "otx2_common.h"
#include "otx2_ptp.h"
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.c b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
index da0b6525ef9a..fc7f2fedafd7 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_acl.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
@@ -419,11 +419,8 @@ struct prestera_acl_rule_entry *
prestera_acl_rule_entry_find(struct prestera_acl *acl,
struct prestera_acl_rule_entry_key *key)
{
- struct prestera_acl_rule_entry *e;
-
- e = rhashtable_lookup_fast(&acl->acl_rule_entry_ht, key,
- __prestera_acl_rule_entry_ht_params);
- return IS_ERR(e) ? NULL : e;
+ return rhashtable_lookup_fast(&acl->acl_rule_entry_ht, key,
+ __prestera_acl_rule_entry_ht_params);
}
static int __prestera_acl_rule_entry2hw_del(struct prestera_switch *sw,
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
index 92cb5e9099c6..6282c9822e2b 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
@@ -443,7 +443,7 @@ struct prestera_msg_counter_resp {
__le32 offset;
__le32 num_counters;
__le32 done;
- struct prestera_msg_counter_stats stats[0];
+ struct prestera_msg_counter_stats stats[];
};
struct prestera_msg_span_req {
@@ -1900,7 +1900,7 @@ int prestera_hw_counters_get(struct prestera_switch *sw, u32 idx,
.block_id = __cpu_to_le32(idx),
.num_counters = __cpu_to_le32(*len),
};
- size_t size = sizeof(*resp) + sizeof(*resp->stats) * (*len);
+ size_t size = struct_size(resp, stats, *len);
int err, i;
resp = kmalloc(size, GFP_KERNEL);
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index c357c193378e..86d356b4388d 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config NET_VENDOR_MEDIATEK
bool "MediaTek devices"
- depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620
+ depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620 || COMPILE_TEST
help
If you have a Mediatek SoC with ethernet, say Y.
@@ -10,6 +10,7 @@ if NET_VENDOR_MEDIATEK
config NET_MEDIATEK_SOC
tristate "MediaTek SoC Gigabit Ethernet support"
depends on NET_DSA || !NET_DSA
+ select PINCTRL
select PHYLINK
select DIMLIB
help
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index de4152e2e3e4..a068cf5c970f 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2923,6 +2923,10 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->phylink_config.dev = &eth->netdev[id]->dev;
mac->phylink_config.type = PHYLINK_NETDEV;
+ /* This driver makes use of state->speed/state->duplex in
+ * mac_config
+ */
+ mac->phylink_config.legacy_pre_march2020 = true;
mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index e97d97e94231..ed5038d98ef6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -670,7 +670,7 @@ void __init mlx4_en_init_ptys2ethtool_map(void)
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_T, SPEED_1000,
ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_CX_SGMII, SPEED_1000,
- ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_KX, SPEED_1000,
ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_T, SPEED_10000,
@@ -682,9 +682,9 @@ void __init mlx4_en_init_ptys2ethtool_map(void)
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KR, SPEED_10000,
ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CR, SPEED_10000,
- ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+ ETHTOOL_LINK_MODE_10000baseCR_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_SR, SPEED_10000,
- ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+ ETHTOOL_LINK_MODE_10000baseSR_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_20GBASE_KR2, SPEED_20000,
ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT,
ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 3f6d5c384637..ad1e4caf48bf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2286,9 +2286,14 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
bool carry_xdp_prog)
{
struct bpf_prog *xdp_prog;
- int i, t;
+ int i, t, ret;
- mlx4_en_copy_priv(tmp, priv, prof);
+ ret = mlx4_en_copy_priv(tmp, priv, prof);
+ if (ret) {
+ en_warn(priv, "%s: mlx4_en_copy_priv() failed, return\n",
+ __func__);
+ return ret;
+ }
if (mlx4_en_alloc_resources(tmp)) {
en_warn(priv,
@@ -2422,10 +2427,6 @@ static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
/* device doesn't support time stamping */
if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 92056452a9e3..4ba1a78c6515 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -115,6 +115,7 @@ config MLX5_TC_CT
config MLX5_TC_SAMPLE
bool "MLX5 TC sample offload support"
depends on MLX5_CLS_ACT
+ depends on PSAMPLE=y || PSAMPLE=n || MLX5_CORE=m
default y
help
Say Y here if you want to support offloading sample rules via tc
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index e63bb9ceb9c0..e592e0955c71 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -46,6 +46,15 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \
en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o \
en/tc/post_act.o en/tc/int_port.o
+
+mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/act/trap.o \
+ en/tc/act/accept.o en/tc/act/mark.o en/tc/act/goto.o \
+ en/tc/act/tun.o en/tc/act/csum.o en/tc/act/pedit.o \
+ en/tc/act/vlan.o en/tc/act/vlan_mangle.o en/tc/act/mpls.o \
+ en/tc/act/mirred.o en/tc/act/mirred_nic.o \
+ en/tc/act/ct.o en/tc/act/sample.o en/tc/act/ptype.o \
+ en/tc/act/redirect_ingress.o
+
mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o
mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 8eaa24d865c5..a46284ca5172 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -341,6 +341,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DEALLOC_SF:
case MLX5_CMD_OP_DESTROY_UCTX:
case MLX5_CMD_OP_DESTROY_UMEM:
+ case MLX5_CMD_OP_MODIFY_RQT:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -446,7 +447,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_MODIFY_TIS:
case MLX5_CMD_OP_QUERY_TIS:
case MLX5_CMD_OP_CREATE_RQT:
- case MLX5_CMD_OP_MODIFY_RQT:
case MLX5_CMD_OP_QUERY_RQT:
case MLX5_CMD_OP_CREATE_FLOW_TABLE:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 48b12ee44b8d..e77c4159713f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -173,7 +173,7 @@ struct page_pool;
#define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\
ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT)
-#define MLX5E_MAX_KLM_PER_WQE(mdev) \
+#define MLX5E_MAX_KLM_PER_WQE \
MLX5E_KLM_ENTRIES_PER_WQE(MLX5E_TX_MPW_MAX_NUM_DS << MLX5_MKEY_BSF_OCTO_SIZE)
#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
@@ -1057,7 +1057,6 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
mlx5e_fp_preactivate preactivate,
void *context, bool reset);
int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv);
-int mlx5e_num_channels_changed(struct mlx5e_priv *priv);
int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context);
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index f8c29022dbb2..66180ffb4606 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -717,7 +717,7 @@ static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
u32 wqebbs;
- max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev);
+ max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE;
max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr;
rest = max_hd_per_wqe % max_klm_per_umr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index 142953847996..0015a81eb9a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -13,6 +13,9 @@ struct mlx5e_rx_res {
unsigned int max_nch;
u32 drop_rqn;
+ struct mlx5e_packet_merge_param pkt_merge_param;
+ struct rw_semaphore pkt_merge_param_sem;
+
struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS];
bool rss_active;
u32 rss_rqns[MLX5E_INDIR_RQT_SIZE];
@@ -392,6 +395,7 @@ static int mlx5e_rx_res_ptp_init(struct mlx5e_rx_res *res)
if (err)
goto out;
+ /* Separated from the channels RQs, does not share pkt_merge state with them */
mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
mlx5e_rqt_get_rqtn(&res->ptp.rqt),
inner_ft_support);
@@ -447,6 +451,9 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
res->max_nch = max_nch;
res->drop_rqn = drop_rqn;
+ res->pkt_merge_param = *init_pkt_merge_param;
+ init_rwsem(&res->pkt_merge_param_sem);
+
err = mlx5e_rx_res_rss_init_def(res, init_pkt_merge_param, init_nch);
if (err)
goto err_out;
@@ -513,7 +520,7 @@ u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res)
return mlx5e_tir_get_tirn(&res->ptp.tir);
}
-u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
+static u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
{
return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt);
}
@@ -656,6 +663,9 @@ int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
if (!builder)
return -ENOMEM;
+ down_write(&res->pkt_merge_param_sem);
+ res->pkt_merge_param = *pkt_merge_param;
+
mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param);
final_err = 0;
@@ -681,6 +691,7 @@ int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
}
}
+ up_write(&res->pkt_merge_param_sem);
mlx5e_tir_builder_free(builder);
return final_err;
}
@@ -689,3 +700,31 @@ struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *
{
return mlx5e_rss_get_hash(res->rss[0]);
}
+
+int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq,
+ struct mlx5e_tir *tir)
+{
+ bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
+ struct mlx5e_tir_builder *builder;
+ u32 rqtn;
+ int err;
+
+ builder = mlx5e_tir_builder_alloc(false);
+ if (!builder)
+ return -ENOMEM;
+
+ rqtn = mlx5e_rx_res_get_rqtn_direct(res, rxq);
+
+ mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, rqtn,
+ inner_ft_support);
+ mlx5e_tir_builder_build_direct(builder);
+ mlx5e_tir_builder_build_tls(builder);
+ down_read(&res->pkt_merge_param_sem);
+ mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
+ err = mlx5e_tir_init(tir, builder, res->mdev, false);
+ up_read(&res->pkt_merge_param_sem);
+
+ mlx5e_tir_builder_free(builder);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
index d09f7d174a51..b39b20a720e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
@@ -37,9 +37,6 @@ u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types
u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res);
-/* RQTN getters for modules that create their own TIRs */
-u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix);
-
/* Activate/deactivate API */
void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs);
void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res);
@@ -69,4 +66,7 @@ struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx);
/* Workaround for hairpin */
struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res);
+/* Accel TIRs */
+int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq,
+ struct mlx5e_tir *tir);
#endif /* __MLX5_EN_RX_RES_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c
new file mode 100644
index 000000000000..b0de6b999675
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_accept(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ return true;
+}
+
+static int
+tc_act_parse_accept(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->flags |= MLX5_ESW_ATTR_FLAG_ACCEPT;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_accept = {
+ .can_offload = tc_act_can_offload_accept,
+ .parse_action = tc_act_parse_accept,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
new file mode 100644
index 000000000000..e600924e30ea
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+#include "mlx5_core.h"
+
+/* Must be aligned with enum flow_action_id. */
+static struct mlx5e_tc_act *tc_acts_fdb[NUM_FLOW_ACTIONS] = {
+ &mlx5e_tc_act_accept,
+ &mlx5e_tc_act_drop,
+ &mlx5e_tc_act_trap,
+ &mlx5e_tc_act_goto,
+ &mlx5e_tc_act_mirred,
+ &mlx5e_tc_act_mirred,
+ &mlx5e_tc_act_redirect_ingress,
+ NULL, /* FLOW_ACTION_MIRRED_INGRESS, */
+ &mlx5e_tc_act_vlan,
+ &mlx5e_tc_act_vlan,
+ &mlx5e_tc_act_vlan_mangle,
+ &mlx5e_tc_act_tun_encap,
+ &mlx5e_tc_act_tun_decap,
+ &mlx5e_tc_act_pedit,
+ &mlx5e_tc_act_pedit,
+ &mlx5e_tc_act_csum,
+ NULL, /* FLOW_ACTION_MARK, */
+ &mlx5e_tc_act_ptype,
+ NULL, /* FLOW_ACTION_PRIORITY, */
+ NULL, /* FLOW_ACTION_WAKE, */
+ NULL, /* FLOW_ACTION_QUEUE, */
+ &mlx5e_tc_act_sample,
+ NULL, /* FLOW_ACTION_POLICE, */
+ &mlx5e_tc_act_ct,
+ NULL, /* FLOW_ACTION_CT_METADATA, */
+ &mlx5e_tc_act_mpls_push,
+ &mlx5e_tc_act_mpls_pop,
+};
+
+/* Must be aligned with enum flow_action_id. */
+static struct mlx5e_tc_act *tc_acts_nic[NUM_FLOW_ACTIONS] = {
+ &mlx5e_tc_act_accept,
+ &mlx5e_tc_act_drop,
+ NULL, /* FLOW_ACTION_TRAP, */
+ &mlx5e_tc_act_goto,
+ &mlx5e_tc_act_mirred_nic,
+ NULL, /* FLOW_ACTION_MIRRED, */
+ NULL, /* FLOW_ACTION_REDIRECT_INGRESS, */
+ NULL, /* FLOW_ACTION_MIRRED_INGRESS, */
+ NULL, /* FLOW_ACTION_VLAN_PUSH, */
+ NULL, /* FLOW_ACTION_VLAN_POP, */
+ NULL, /* FLOW_ACTION_VLAN_MANGLE, */
+ NULL, /* FLOW_ACTION_TUNNEL_ENCAP, */
+ NULL, /* FLOW_ACTION_TUNNEL_DECAP, */
+ &mlx5e_tc_act_pedit,
+ &mlx5e_tc_act_pedit,
+ &mlx5e_tc_act_csum,
+ &mlx5e_tc_act_mark,
+ NULL, /* FLOW_ACTION_PTYPE, */
+ NULL, /* FLOW_ACTION_PRIORITY, */
+ NULL, /* FLOW_ACTION_WAKE, */
+ NULL, /* FLOW_ACTION_QUEUE, */
+ NULL, /* FLOW_ACTION_SAMPLE, */
+ NULL, /* FLOW_ACTION_POLICE, */
+ &mlx5e_tc_act_ct,
+};
+
+/**
+ * mlx5e_tc_act_get() - Get an action parser for an action id.
+ * @act_id: Flow action id.
+ * @ns_type: flow namespace type.
+ */
+struct mlx5e_tc_act *
+mlx5e_tc_act_get(enum flow_action_id act_id,
+ enum mlx5_flow_namespace_type ns_type)
+{
+ struct mlx5e_tc_act **tc_acts;
+
+ tc_acts = ns_type == MLX5_FLOW_NAMESPACE_FDB ? tc_acts_fdb : tc_acts_nic;
+
+ return tc_acts[act_id];
+}
+
+/**
+ * mlx5e_tc_act_init_parse_state() - Init a new parse_state.
+ * @parse_state: Parsing state.
+ * @flow: mlx5e tc flow being handled.
+ * @flow_action: flow action to parse.
+ * @extack: to set an error msg.
+ *
+ * The same parse_state should be passed to action parsers
+ * for tracking the current parsing state.
+ */
+void
+mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
+ struct mlx5e_tc_flow *flow,
+ struct flow_action *flow_action,
+ struct netlink_ext_ack *extack)
+{
+ memset(parse_state, 0, sizeof(*parse_state));
+ parse_state->flow = flow;
+ parse_state->num_actions = flow_action->num_entries;
+ parse_state->extack = extack;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
new file mode 100644
index 000000000000..26efa33de56f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_TC_ACT_H__
+#define __MLX5_EN_TC_ACT_H__
+
+#include <net/tc_act/tc_pedit.h>
+#include <net/flow_offload.h>
+#include <linux/netlink.h>
+#include "eswitch.h"
+#include "pedit.h"
+
+struct mlx5_flow_attr;
+
+struct mlx5e_tc_act_parse_state {
+ unsigned int num_actions;
+ struct mlx5e_tc_flow *flow;
+ struct netlink_ext_ack *extack;
+ bool encap;
+ bool decap;
+ bool mpls_push;
+ bool ptype_host;
+ const struct ip_tunnel_info *tun_info;
+ struct pedit_headers_action hdrs[__PEDIT_CMD_MAX];
+ int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
+ int if_count;
+ struct mlx5_tc_ct_priv *ct_priv;
+};
+
+struct mlx5e_tc_act {
+ bool (*can_offload)(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index);
+
+ int (*parse_action)(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr);
+
+ int (*post_parse)(struct mlx5e_tc_act_parse_state *parse_state,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr);
+};
+
+extern struct mlx5e_tc_act mlx5e_tc_act_drop;
+extern struct mlx5e_tc_act mlx5e_tc_act_trap;
+extern struct mlx5e_tc_act mlx5e_tc_act_accept;
+extern struct mlx5e_tc_act mlx5e_tc_act_mark;
+extern struct mlx5e_tc_act mlx5e_tc_act_goto;
+extern struct mlx5e_tc_act mlx5e_tc_act_tun_encap;
+extern struct mlx5e_tc_act mlx5e_tc_act_tun_decap;
+extern struct mlx5e_tc_act mlx5e_tc_act_csum;
+extern struct mlx5e_tc_act mlx5e_tc_act_pedit;
+extern struct mlx5e_tc_act mlx5e_tc_act_vlan;
+extern struct mlx5e_tc_act mlx5e_tc_act_vlan_mangle;
+extern struct mlx5e_tc_act mlx5e_tc_act_mpls_push;
+extern struct mlx5e_tc_act mlx5e_tc_act_mpls_pop;
+extern struct mlx5e_tc_act mlx5e_tc_act_mirred;
+extern struct mlx5e_tc_act mlx5e_tc_act_mirred_nic;
+extern struct mlx5e_tc_act mlx5e_tc_act_ct;
+extern struct mlx5e_tc_act mlx5e_tc_act_sample;
+extern struct mlx5e_tc_act mlx5e_tc_act_ptype;
+extern struct mlx5e_tc_act mlx5e_tc_act_redirect_ingress;
+
+struct mlx5e_tc_act *
+mlx5e_tc_act_get(enum flow_action_id act_id,
+ enum mlx5_flow_namespace_type ns_type);
+
+void
+mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
+ struct mlx5e_tc_flow *flow,
+ struct flow_action *flow_action,
+ struct netlink_ext_ack *extack);
+
+#endif /* __MLX5_EN_TC_ACT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c
new file mode 100644
index 000000000000..29920ef0180a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/tc_act/tc_csum.h>
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+csum_offload_supported(struct mlx5e_priv *priv,
+ u32 action,
+ u32 update_flags,
+ struct netlink_ext_ack *extack)
+{
+ u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
+ TCA_CSUM_UPDATE_FLAG_UDP;
+
+ /* The HW recalcs checksums only if re-writing headers */
+ if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "TC csum action is only offloaded with pedit");
+ netdev_warn(priv->netdev,
+ "TC csum action is only offloaded with pedit\n");
+ return false;
+ }
+
+ if (update_flags & ~prot_flags) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't offload TC csum action for some header/s");
+ netdev_warn(priv->netdev,
+ "can't offload TC csum action for some header/s - flags %#x\n",
+ update_flags);
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+tc_act_can_offload_csum(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct mlx5e_tc_flow *flow = parse_state->flow;
+
+ return csum_offload_supported(flow->priv, flow->attr->action,
+ act->csum_flags, parse_state->extack);
+}
+
+static int
+tc_act_parse_csum(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_csum = {
+ .can_offload = tc_act_can_offload_csum,
+ .parse_action = tc_act_parse_csum,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
new file mode 100644
index 000000000000..06ec30cdb269
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+#include "en/tc_ct.h"
+
+static bool
+tc_act_can_offload_ct(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+
+ if (flow_flag_test(parse_state->flow, SAMPLE)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Sample action with connection tracking is not supported");
+ return false;
+ }
+
+ return true;
+}
+
+static int
+tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ int err;
+
+ err = mlx5_tc_ct_parse_action(parse_state->ct_priv, attr,
+ &attr->parse_attr->mod_hdr_acts,
+ act, parse_state->extack);
+ if (err)
+ return err;
+
+ flow_flag_set(parse_state->flow, CT);
+
+ if (mlx5e_is_eswitch_flow(parse_state->flow))
+ attr->esw_attr->split_count = attr->esw_attr->out_count;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_ct = {
+ .can_offload = tc_act_can_offload_ct,
+ .parse_action = tc_act_parse_ct,
+};
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c
new file mode 100644
index 000000000000..2e29a23bed12
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_drop(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ return true;
+}
+
+static int
+tc_act_parse_drop(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_drop = {
+ .can_offload = tc_act_can_offload_drop,
+ .parse_action = tc_act_parse_drop,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
new file mode 100644
index 000000000000..f44515061228
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+#include "eswitch.h"
+
+static int
+validate_goto_chain(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ bool is_esw = mlx5e_is_eswitch_flow(flow);
+ bool ft_flow = mlx5e_is_ft_flow(flow);
+ u32 dest_chain = act->chain_index;
+ struct mlx5_fs_chains *chains;
+ struct mlx5_eswitch *esw;
+ u32 reformat_and_fwd;
+ u32 max_chain;
+
+ esw = priv->mdev->priv.eswitch;
+ chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(priv);
+ max_chain = mlx5_chains_get_chain_range(chains);
+ reformat_and_fwd = is_esw ?
+ MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
+ MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, reformat_and_fwd_to_table);
+
+ if (ft_flow) {
+ NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!mlx5_chains_backwards_supported(chains) &&
+ dest_chain <= flow->attr->chain) {
+ NL_SET_ERR_MSG_MOD(extack, "Goto lower numbered chain isn't supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (dest_chain > max_chain) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Requested destination chain is out of supported range");
+ return -EOPNOTSUPP;
+ }
+
+ if (flow->attr->action & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
+ MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
+ !reformat_and_fwd) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Goto chain is not allowed if action has reformat or decap");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static bool
+tc_act_can_offload_goto(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_tc_flow *flow = parse_state->flow;
+
+ if (validate_goto_chain(flow->priv, flow, act, extack))
+ return false;
+
+ return true;
+}
+
+static int
+tc_act_parse_goto(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->dest_chain = act->chain_index;
+
+ return 0;
+}
+
+static int
+tc_act_post_parse_goto(struct mlx5e_tc_act_parse_state *parse_state,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_tc_flow *flow = parse_state->flow;
+
+ if (!attr->dest_chain)
+ return 0;
+
+ if (parse_state->decap) {
+ /* It can be supported if we'll create a mapping for
+ * the tunnel device only (without tunnel), and set
+ * this tunnel id with this decap flow.
+ *
+ * On restore (miss), we'll just set this saved tunnel
+ * device.
+ */
+
+ NL_SET_ERR_MSG_MOD(extack, "Decap with goto isn't supported");
+ netdev_warn(priv->netdev, "Decap with goto isn't supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!mlx5e_is_eswitch_flow(flow) && parse_attr->mirred_ifindex[0]) {
+ NL_SET_ERR_MSG_MOD(extack, "Mirroring goto chain rules isn't supported");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_goto = {
+ .can_offload = tc_act_can_offload_goto,
+ .parse_action = tc_act_parse_goto,
+ .post_parse = tc_act_post_parse_goto,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c
new file mode 100644
index 000000000000..d775c3d9edf3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en_tc.h"
+
+static bool
+tc_act_can_offload_mark(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ if (act->mark & ~MLX5E_TC_FLOW_ID_MASK) {
+ NL_SET_ERR_MSG_MOD(parse_state->extack, "Bad flow mark, only 16 bit supported");
+ return false;
+ }
+
+ return true;
+}
+
+static int
+tc_act_parse_mark(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ attr->nic_attr->flow_tag = act->mark;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_mark = {
+ .can_offload = tc_act_can_offload_mark,
+ .parse_action = tc_act_parse_mark,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
new file mode 100644
index 000000000000..a0832b86016c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/if_macvlan.h>
+#include <linux/if_vlan.h>
+#include <net/bareudp.h>
+#include <net/bonding.h>
+#include "act.h"
+#include "vlan.h"
+#include "en/tc_tun_encap.h"
+#include "en/tc_priv.h"
+#include "en_rep.h"
+
+static bool
+same_vf_reps(struct mlx5e_priv *priv, struct net_device *out_dev)
+{
+ return mlx5e_eswitch_vf_rep(priv->netdev) &&
+ priv->netdev == out_dev;
+}
+
+static int
+verify_uplink_forwarding(struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr,
+ struct net_device *out_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_rep_priv *rep_priv;
+
+ /* Forwarding non encapsulated traffic between
+ * uplink ports is allowed only if
+ * termination_table_raw_traffic cap is set.
+ *
+ * Input vport was stored attr->in_rep.
+ * In LAG case, *priv* is the private data of
+ * uplink which may be not the input vport.
+ */
+ rep_priv = mlx5e_rep_to_rep_priv(attr->esw_attr->in_rep);
+
+ if (!(mlx5e_eswitch_uplink_rep(rep_priv->netdev) &&
+ mlx5e_eswitch_uplink_rep(out_dev)))
+ return 0;
+
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev,
+ termination_table_raw_traffic)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "devices are both uplink, can't offload forwarding");
+ pr_err("devices %s %s are both uplink, can't offload forwarding\n",
+ priv->netdev->name, out_dev->name);
+ return -EOPNOTSUPP;
+ } else if (out_dev != rep_priv->netdev) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "devices are not the same uplink, can't offload forwarding");
+ pr_err("devices %s %s are both uplink but not the same, can't offload forwarding\n",
+ priv->netdev->name, out_dev->name);
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static bool
+is_duplicated_output_device(struct net_device *dev,
+ struct net_device *out_dev,
+ int *ifindexes, int if_count,
+ struct netlink_ext_ack *extack)
+{
+ int i;
+
+ for (i = 0; i < if_count; i++) {
+ if (ifindexes[i] == out_dev->ifindex) {
+ NL_SET_ERR_MSG_MOD(extack, "can't duplicate output to same device");
+ netdev_err(dev, "can't duplicate output to same device: %s\n",
+ out_dev->name);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static struct net_device *
+get_fdb_out_dev(struct net_device *uplink_dev, struct net_device *out_dev)
+{
+ struct net_device *fdb_out_dev = out_dev;
+ struct net_device *uplink_upper;
+
+ rcu_read_lock();
+ uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev);
+ if (uplink_upper && netif_is_lag_master(uplink_upper) &&
+ uplink_upper == out_dev) {
+ fdb_out_dev = uplink_dev;
+ } else if (netif_is_lag_master(out_dev)) {
+ fdb_out_dev = bond_option_active_slave_get_rcu(netdev_priv(out_dev));
+ if (fdb_out_dev &&
+ (!mlx5e_eswitch_rep(fdb_out_dev) ||
+ !netdev_port_same_parent_id(fdb_out_dev, uplink_dev)))
+ fdb_out_dev = NULL;
+ }
+ rcu_read_unlock();
+ return fdb_out_dev;
+}
+
+static bool
+tc_act_can_offload_mirred(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_tc_flow *flow = parse_state->flow;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct net_device *out_dev = act->dev;
+ struct mlx5e_priv *priv = flow->priv;
+ struct mlx5_esw_flow_attr *esw_attr;
+
+ parse_attr = flow->attr->parse_attr;
+ esw_attr = flow->attr->esw_attr;
+
+ if (!out_dev) {
+ /* out_dev is NULL when filters with
+ * non-existing mirred device are replayed to
+ * the driver.
+ */
+ return false;
+ }
+
+ if (parse_state->mpls_push && !netif_is_bareudp(out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "mpls is supported only through a bareudp device");
+ return false;
+ }
+
+ if (mlx5e_is_ft_flow(flow) && out_dev == priv->netdev) {
+ /* Ignore forward to self rules generated
+ * by adding both mlx5 devs to the flow table
+ * block on a normal nft offload setup.
+ */
+ return false;
+ }
+
+ if (esw_attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't support more output ports, can't offload forwarding");
+ netdev_warn(priv->netdev,
+ "can't support more than %d output ports, can't offload forwarding\n",
+ esw_attr->out_count);
+ return false;
+ }
+
+ if (parse_state->encap ||
+ netdev_port_same_parent_id(priv->netdev, out_dev) ||
+ netif_is_ovs_master(out_dev))
+ return true;
+
+ if (parse_attr->filter_dev != priv->netdev) {
+ /* All mlx5 devices are called to configure
+ * high level device filters. Therefore, the
+ * *attempt* to install a filter on invalid
+ * eswitch should not trigger an explicit error
+ */
+ return false;
+ }
+
+ NL_SET_ERR_MSG_MOD(extack, "devices are not on same switch HW, can't offload forwarding");
+ netdev_warn(priv->netdev,
+ "devices %s %s not on same switch HW, can't offload forwarding\n",
+ netdev_name(priv->netdev),
+ out_dev->name);
+
+ return false;
+}
+
+static int
+parse_mirred_encap(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct net_device *out_dev = act->dev;
+
+ parse_attr->mirred_ifindex[esw_attr->out_count] = out_dev->ifindex;
+ parse_attr->tun_info[esw_attr->out_count] =
+ mlx5e_dup_tun_info(parse_state->tun_info);
+
+ if (!parse_attr->tun_info[esw_attr->out_count])
+ return -ENOMEM;
+
+ parse_state->encap = false;
+ esw_attr->dests[esw_attr->out_count].flags |= MLX5_ESW_DEST_ENCAP;
+ esw_attr->out_count++;
+ /* attr->dests[].rep is resolved when we handle encap */
+
+ return 0;
+}
+
+static int
+parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct net_device *out_dev = act->dev;
+ struct net_device *uplink_dev;
+ struct mlx5e_priv *out_priv;
+ struct mlx5_eswitch *esw;
+ int *ifindexes;
+ int if_count;
+ int err;
+
+ esw = priv->mdev->priv.eswitch;
+ uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
+ ifindexes = parse_state->ifindexes;
+ if_count = parse_state->if_count;
+
+ if (is_duplicated_output_device(priv->netdev, out_dev, ifindexes, if_count, extack))
+ return -EOPNOTSUPP;
+
+ parse_state->ifindexes[if_count] = out_dev->ifindex;
+ parse_state->if_count++;
+
+ out_dev = get_fdb_out_dev(uplink_dev, out_dev);
+ if (!out_dev)
+ return -ENODEV;
+
+ if (is_vlan_dev(out_dev)) {
+ err = mlx5e_tc_act_vlan_add_push_action(priv, attr, &out_dev, extack);
+ if (err)
+ return err;
+ }
+
+ if (is_vlan_dev(parse_attr->filter_dev)) {
+ err = mlx5e_tc_act_vlan_add_pop_action(priv, attr, extack);
+ if (err)
+ return err;
+ }
+
+ if (netif_is_macvlan(out_dev))
+ out_dev = macvlan_dev_real_dev(out_dev);
+
+ err = verify_uplink_forwarding(priv, attr, out_dev, extack);
+ if (err)
+ return err;
+
+ if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "devices are not on same switch HW, can't offload forwarding");
+ return -EOPNOTSUPP;
+ }
+
+ if (same_vf_reps(priv, out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "can't forward from a VF to itself");
+ return -EOPNOTSUPP;
+ }
+
+ out_priv = netdev_priv(out_dev);
+ rpriv = out_priv->ppriv;
+ esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
+ esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev;
+ esw_attr->out_count++;
+
+ return 0;
+}
+
+static int
+parse_mirred_ovs_master(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct net_device *out_dev = act->dev;
+ int err;
+
+ err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex,
+ MLX5E_TC_INT_PORT_EGRESS,
+ &attr->action, esw_attr->out_count);
+ if (err)
+ return err;
+
+ esw_attr->out_count++;
+ return 0;
+}
+
+static int
+tc_act_parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct net_device *out_dev = act->dev;
+ int err = -EOPNOTSUPP;
+
+ if (parse_state->encap)
+ err = parse_mirred_encap(parse_state, act, attr);
+ else if (netdev_port_same_parent_id(priv->netdev, out_dev))
+ err = parse_mirred(parse_state, act, priv, attr);
+ else if (netif_is_ovs_master(out_dev))
+ err = parse_mirred_ovs_master(parse_state, act, priv, attr);
+
+ if (err)
+ return err;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_mirred = {
+ .can_offload = tc_act_can_offload_mirred,
+ .parse_action = tc_act_parse_mirred,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c
new file mode 100644
index 000000000000..2c74567b6d25
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_mirred_nic(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_tc_flow *flow = parse_state->flow;
+ struct net_device *out_dev = act->dev;
+ struct mlx5e_priv *priv = flow->priv;
+
+ if (act->id != FLOW_ACTION_REDIRECT)
+ return false;
+
+ if (priv->netdev->netdev_ops != out_dev->netdev_ops ||
+ !mlx5e_same_hw_devs(priv, netdev_priv(out_dev))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "devices are not on same switch HW, can't offload forwarding");
+ netdev_warn(priv->netdev,
+ "devices %s %s not on same switch HW, can't offload forwarding\n",
+ netdev_name(priv->netdev),
+ out_dev->name);
+ return false;
+ }
+
+ return true;
+}
+
+static int
+tc_act_parse_mirred_nic(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ attr->parse_attr->mirred_ifindex[0] = act->dev->ifindex;
+ flow_flag_set(parse_state->flow, HAIRPIN);
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_mirred_nic = {
+ .can_offload = tc_act_can_offload_mirred_nic,
+ .parse_action = tc_act_parse_mirred_nic,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c
new file mode 100644
index 000000000000..784fc4f68b1e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <net/bareudp.h>
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_priv *priv = parse_state->flow->priv;
+
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_l2_to_l3_tunnel) ||
+ act->mpls_push.proto != htons(ETH_P_MPLS_UC)) {
+ NL_SET_ERR_MSG_MOD(extack, "mpls push is supported only for mpls_uc protocol");
+ return false;
+ }
+
+ return true;
+}
+
+static int
+tc_act_parse_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ parse_state->mpls_push = true;
+
+ return 0;
+}
+
+static bool
+tc_act_can_offload_mpls_pop(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_tc_flow *flow = parse_state->flow;
+ struct net_device *filter_dev;
+
+ filter_dev = flow->attr->parse_attr->filter_dev;
+
+ /* we only support mpls pop if it is the first action
+ * and the filter net device is bareudp. Subsequent
+ * actions can be pedit and the last can be mirred
+ * egress redirect.
+ */
+ if (act_index) {
+ NL_SET_ERR_MSG_MOD(extack, "mpls pop supported only as first action");
+ return false;
+ }
+
+ if (!netif_is_bareudp(filter_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "mpls pop supported only on bareudp devices");
+ return false;
+ }
+
+ return true;
+}
+
+static int
+tc_act_parse_mpls_pop(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ attr->parse_attr->eth.h_proto = act->mpls_pop.proto;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_flag_set(parse_state->flow, L3_TO_L2_DECAP);
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_mpls_push = {
+ .can_offload = tc_act_can_offload_mpls_push,
+ .parse_action = tc_act_parse_mpls_push,
+};
+
+struct mlx5e_tc_act mlx5e_tc_act_mpls_pop = {
+ .can_offload = tc_act_can_offload_mpls_pop,
+ .parse_action = tc_act_parse_mpls_pop,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
new file mode 100644
index 000000000000..79addbbef087
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/if_vlan.h>
+#include "act.h"
+#include "pedit.h"
+#include "en/tc_priv.h"
+#include "en/mod_hdr.h"
+
+static int pedit_header_offsets[] = {
+ [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
+ [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
+ [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
+ [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
+ [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
+};
+
+#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
+
+static int
+set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
+ struct pedit_headers_action *hdrs,
+ struct netlink_ext_ack *extack)
+{
+ u32 *curr_pmask, *curr_pval;
+
+ curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
+ curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
+
+ if (*curr_pmask & mask) { /* disallow acting twice on the same location */
+ NL_SET_ERR_MSG_MOD(extack,
+ "curr_pmask and new mask same. Acting twice on same location");
+ goto out_err;
+ }
+
+ *curr_pmask |= mask;
+ *curr_pval |= (val & mask);
+
+ return 0;
+
+out_err:
+ return -EOPNOTSUPP;
+}
+
+static int
+parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act, int namespace,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
+ struct netlink_ext_ack *extack)
+{
+ u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
+ u8 htype = act->mangle.htype;
+ int err = -EOPNOTSUPP;
+ u32 mask, val, offset;
+
+ if (htype == FLOW_ACT_MANGLE_UNSPEC) {
+ NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
+ goto out_err;
+ }
+
+ if (!mlx5e_mod_hdr_max_actions(priv->mdev, namespace)) {
+ NL_SET_ERR_MSG_MOD(extack, "The pedit offload action is not supported");
+ goto out_err;
+ }
+
+ mask = act->mangle.mask;
+ val = act->mangle.val;
+ offset = act->mangle.offset;
+
+ err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd], extack);
+ if (err)
+ goto out_err;
+
+ hdrs[cmd].pedits++;
+
+ return 0;
+out_err:
+ return err;
+}
+
+static int
+parse_pedit_to_reformat(const struct flow_action_entry *act,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct netlink_ext_ack *extack)
+{
+ u32 mask, val, offset;
+ u32 *p;
+
+ if (act->id != FLOW_ACTION_MANGLE) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action id");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) {
+ NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported");
+ return -EOPNOTSUPP;
+ }
+
+ mask = ~act->mangle.mask;
+ val = act->mangle.val;
+ offset = act->mangle.offset;
+ p = (u32 *)&parse_attr->eth;
+ *(p + (offset >> 2)) |= (val & mask);
+
+ return 0;
+}
+
+int
+mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act, int namespace,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
+{
+ if (flow && flow_flag_test(flow, L3_TO_L2_DECAP))
+ return parse_pedit_to_reformat(act, parse_attr, extack);
+
+ return parse_pedit_to_modify_hdr(priv, act, namespace, parse_attr, hdrs, extack);
+}
+
+static bool
+tc_act_can_offload_pedit(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ return true;
+}
+
+static int
+tc_act_parse_pedit(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5e_tc_flow *flow = parse_state->flow;
+ enum mlx5_flow_namespace_type ns_type;
+ int err;
+
+ ns_type = mlx5e_get_flow_namespace(flow);
+
+ err = mlx5e_tc_act_pedit_parse_action(flow->priv, act, ns_type,
+ attr->parse_attr, parse_state->hdrs,
+ flow, parse_state->extack);
+ if (err)
+ return err;
+
+ if (flow_flag_test(flow, L3_TO_L2_DECAP))
+ goto out;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
+ esw_attr->split_count = esw_attr->out_count;
+
+out:
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_pedit = {
+ .can_offload = tc_act_can_offload_pedit,
+ .parse_action = tc_act_parse_pedit,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h
new file mode 100644
index 000000000000..da8ab03af58f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_TC_ACT_PEDIT_H__
+#define __MLX5_EN_TC_ACT_PEDIT_H__
+
+#include "en_tc.h"
+
+struct pedit_headers {
+ struct ethhdr eth;
+ struct vlan_hdr vlan;
+ struct iphdr ip4;
+ struct ipv6hdr ip6;
+ struct tcphdr tcp;
+ struct udphdr udp;
+};
+
+struct pedit_headers_action {
+ struct pedit_headers vals;
+ struct pedit_headers masks;
+ u32 pedits;
+};
+
+int
+mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act, int namespace,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack);
+
+#endif /* __MLX5_EN_TC_ACT_PEDIT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c
new file mode 100644
index 000000000000..0819110193dc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_ptype(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ return true;
+}
+
+static int
+tc_act_parse_ptype(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+
+ if (act->ptype != PACKET_HOST) {
+ NL_SET_ERR_MSG_MOD(extack, "skbedit ptype is only supported with type host");
+ return -EOPNOTSUPP;
+ }
+
+ parse_state->ptype_host = true;
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_ptype = {
+ .can_offload = tc_act_can_offload_ptype,
+ .parse_action = tc_act_parse_ptype,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c
new file mode 100644
index 000000000000..1c32e24e528d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_redirect_ingress(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_tc_flow *flow = parse_state->flow;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct net_device *out_dev = act->dev;
+ struct mlx5_esw_flow_attr *esw_attr;
+
+ parse_attr = flow->attr->parse_attr;
+ esw_attr = flow->attr->esw_attr;
+
+ if (!out_dev)
+ return false;
+
+ if (!netif_is_ovs_master(out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "redirect to ingress is supported only for OVS internal ports");
+ return false;
+ }
+
+ if (netif_is_ovs_master(parse_attr->filter_dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "redirect to ingress is not supported from internal port");
+ return false;
+ }
+
+ if (!parse_state->ptype_host) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "redirect to int port ingress requires ptype=host action");
+ return false;
+ }
+
+ if (esw_attr->out_count) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "redirect to int port ingress is supported only as single destination");
+ return false;
+ }
+
+ return true;
+}
+
+static int
+tc_act_parse_redirect_ingress(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct net_device *out_dev = act->dev;
+ int err;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+
+ err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex,
+ MLX5E_TC_INT_PORT_INGRESS,
+ &attr->action, esw_attr->out_count);
+ if (err)
+ return err;
+
+ esw_attr->out_count++;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_redirect_ingress = {
+ .can_offload = tc_act_can_offload_redirect_ingress,
+ .parse_action = tc_act_parse_redirect_ingress,
+};
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c
new file mode 100644
index 000000000000..6699bdf5cf01
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <net/psample.h>
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_sample(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+
+ if (flow_flag_test(parse_state->flow, CT)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Sample action with connection tracking is not supported");
+ return false;
+ }
+
+ return true;
+}
+
+static int
+tc_act_parse_sample(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5e_sample_attr *sample_attr;
+
+ sample_attr = kzalloc(sizeof(*attr->sample_attr), GFP_KERNEL);
+ if (!sample_attr)
+ return -ENOMEM;
+
+ sample_attr->rate = act->sample.rate;
+ sample_attr->group_num = act->sample.psample_group->group_num;
+
+ if (act->sample.truncate)
+ sample_attr->trunc_size = act->sample.trunc_size;
+
+ attr->sample_attr = sample_attr;
+ flow_flag_set(parse_state->flow, SAMPLE);
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_sample = {
+ .can_offload = tc_act_can_offload_sample,
+ .parse_action = tc_act_parse_sample,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
new file mode 100644
index 000000000000..046b64c2cec4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_trap(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+
+ if (parse_state->num_actions != 1) {
+ NL_SET_ERR_MSG_MOD(extack, "action trap is supported as a sole action only");
+ return false;
+ }
+
+ return true;
+}
+
+static int
+tc_act_parse_trap(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_trap = {
+ .can_offload = tc_act_can_offload_trap,
+ .parse_action = tc_act_parse_trap,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c
new file mode 100644
index 000000000000..6f4a2cf46afd
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_tun_encap.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_tun_encap(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ if (!act->tunnel) {
+ NL_SET_ERR_MSG_MOD(parse_state->extack,
+ "Zero tunnel attributes is not supported");
+ return false;
+ }
+
+ return true;
+}
+
+static int
+tc_act_parse_tun_encap(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ parse_state->tun_info = act->tunnel;
+ parse_state->encap = true;
+
+ return 0;
+}
+
+static bool
+tc_act_can_offload_tun_decap(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ return true;
+}
+
+static int
+tc_act_parse_tun_decap(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ parse_state->decap = true;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_tun_encap = {
+ .can_offload = tc_act_can_offload_tun_encap,
+ .parse_action = tc_act_parse_tun_encap,
+};
+
+struct mlx5e_tc_act mlx5e_tc_act_tun_decap = {
+ .can_offload = tc_act_can_offload_tun_decap,
+ .parse_action = tc_act_parse_tun_decap,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
new file mode 100644
index 000000000000..70fc0c2d8813
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/if_vlan.h>
+#include "act.h"
+#include "vlan.h"
+#include "en/tc_priv.h"
+
+static int
+add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
+ u32 *action, struct netlink_ext_ack *extack)
+{
+ const struct flow_action_entry prio_tag_act = {
+ .vlan.vid = 0,
+ .vlan.prio =
+ MLX5_GET(fte_match_set_lyr_2_4,
+ mlx5e_get_match_headers_value(*action,
+ &parse_attr->spec),
+ first_prio) &
+ MLX5_GET(fte_match_set_lyr_2_4,
+ mlx5e_get_match_headers_criteria(*action,
+ &parse_attr->spec),
+ first_prio),
+ };
+
+ return mlx5e_tc_act_vlan_add_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
+ &prio_tag_act, parse_attr, hdrs, action,
+ extack);
+}
+
+static int
+parse_tc_vlan_action(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act,
+ struct mlx5_esw_flow_attr *attr,
+ u32 *action,
+ struct netlink_ext_ack *extack)
+{
+ u8 vlan_idx = attr->total_vlan;
+
+ if (vlan_idx >= MLX5_FS_VLAN_DEPTH) {
+ NL_SET_ERR_MSG_MOD(extack, "Total vlans used is greater than supported");
+ return -EOPNOTSUPP;
+ }
+
+ switch (act->id) {
+ case FLOW_ACTION_VLAN_POP:
+ if (vlan_idx) {
+ if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
+ MLX5_FS_VLAN_DEPTH)) {
+ NL_SET_ERR_MSG_MOD(extack, "vlan pop action is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
+ } else {
+ *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+ }
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ attr->vlan_vid[vlan_idx] = act->vlan.vid;
+ attr->vlan_prio[vlan_idx] = act->vlan.prio;
+ attr->vlan_proto[vlan_idx] = act->vlan.proto;
+ if (!attr->vlan_proto[vlan_idx])
+ attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
+
+ if (vlan_idx) {
+ if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
+ MLX5_FS_VLAN_DEPTH)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "vlan push action is not supported for vlan depth > 1");
+ return -EOPNOTSUPP;
+ }
+
+ *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
+ } else {
+ if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
+ (act->vlan.proto != htons(ETH_P_8021Q) ||
+ act->vlan.prio)) {
+ NL_SET_ERR_MSG_MOD(extack, "vlan push action is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
+ }
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unexpected action id for VLAN");
+ return -EINVAL;
+ }
+
+ attr->total_vlan = vlan_idx + 1;
+
+ return 0;
+}
+
+int
+mlx5e_tc_act_vlan_add_push_action(struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr,
+ struct net_device **out_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *vlan_dev = *out_dev;
+ struct flow_action_entry vlan_act = {
+ .id = FLOW_ACTION_VLAN_PUSH,
+ .vlan.vid = vlan_dev_vlan_id(vlan_dev),
+ .vlan.proto = vlan_dev_vlan_proto(vlan_dev),
+ .vlan.prio = 0,
+ };
+ int err;
+
+ err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action, extack);
+ if (err)
+ return err;
+
+ rcu_read_lock();
+ *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), dev_get_iflink(vlan_dev));
+ rcu_read_unlock();
+ if (!*out_dev)
+ return -ENODEV;
+
+ if (is_vlan_dev(*out_dev))
+ err = mlx5e_tc_act_vlan_add_push_action(priv, attr, out_dev, extack);
+
+ return err;
+}
+
+int
+mlx5e_tc_act_vlan_add_pop_action(struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct flow_action_entry vlan_act = {
+ .id = FLOW_ACTION_VLAN_POP,
+ };
+ int nest_level, err = 0;
+
+ nest_level = attr->parse_attr->filter_dev->lower_level -
+ priv->netdev->lower_level;
+ while (nest_level--) {
+ err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action,
+ extack);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+static bool
+tc_act_can_offload_vlan(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ return true;
+}
+
+static int
+tc_act_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ int err;
+
+ if (act->id == FLOW_ACTION_VLAN_PUSH &&
+ (attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
+ /* Replace vlan pop+push with vlan modify */
+ attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+ err = mlx5e_tc_act_vlan_add_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB, act,
+ attr->parse_attr, parse_state->hdrs,
+ &attr->action, parse_state->extack);
+ } else {
+ err = parse_tc_vlan_action(priv, act, esw_attr, &attr->action,
+ parse_state->extack);
+ }
+
+ if (err)
+ return err;
+
+ esw_attr->split_count = esw_attr->out_count;
+
+ return 0;
+}
+
+static int
+tc_act_post_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
+ struct pedit_headers_action *hdrs = parse_state->hdrs;
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ int err;
+
+ if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
+ attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
+ /* For prio tag mode, replace vlan pop with rewrite vlan prio
+ * tag rewrite.
+ */
+ attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+ err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
+ &attr->action, extack);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_vlan = {
+ .can_offload = tc_act_can_offload_vlan,
+ .parse_action = tc_act_parse_vlan,
+ .post_parse = tc_act_post_parse_vlan,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h
new file mode 100644
index 000000000000..3d62f13ab61f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_TC_ACT_VLAN_H__
+#define __MLX5_EN_TC_ACT_VLAN_H__
+
+#include <net/flow_offload.h>
+#include "en/tc_priv.h"
+
+struct pedit_headers_action;
+
+int
+mlx5e_tc_act_vlan_add_push_action(struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr,
+ struct net_device **out_dev,
+ struct netlink_ext_ack *extack);
+
+int
+mlx5e_tc_act_vlan_add_pop_action(struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr,
+ struct netlink_ext_ack *extack);
+
+int
+mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace,
+ const struct flow_action_entry *act,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
+ u32 *action, struct netlink_ext_ack *extack);
+
+#endif /* __MLX5_EN_TC_ACT_VLAN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
new file mode 100644
index 000000000000..63e36e7f53e3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/if_vlan.h>
+#include "act.h"
+#include "vlan.h"
+#include "en/tc_priv.h"
+
+struct pedit_headers_action;
+
+int
+mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace,
+ const struct flow_action_entry *act,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
+ u32 *action, struct netlink_ext_ack *extack)
+{
+ u16 mask16 = VLAN_VID_MASK;
+ u16 val16 = act->vlan.vid & VLAN_VID_MASK;
+ const struct flow_action_entry pedit_act = {
+ .id = FLOW_ACTION_MANGLE,
+ .mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
+ .mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
+ .mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
+ .mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
+ };
+ u8 match_prio_mask, match_prio_val;
+ void *headers_c, *headers_v;
+ int err;
+
+ headers_c = mlx5e_get_match_headers_criteria(*action, &parse_attr->spec);
+ headers_v = mlx5e_get_match_headers_value(*action, &parse_attr->spec);
+
+ if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
+ MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
+ NL_SET_ERR_MSG_MOD(extack, "VLAN rewrite action must have VLAN protocol match");
+ return -EOPNOTSUPP;
+ }
+
+ match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
+ match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
+ if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
+ NL_SET_ERR_MSG_MOD(extack, "Changing VLAN prio is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ err = mlx5e_tc_act_pedit_parse_action(priv, &pedit_act, namespace, parse_attr, hdrs,
+ NULL, extack);
+ *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ return err;
+}
+
+static bool
+tc_act_can_offload_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ return true;
+}
+
+static int
+tc_act_parse_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ enum mlx5_flow_namespace_type ns_type;
+ int err;
+
+ ns_type = mlx5e_get_flow_namespace(parse_state->flow);
+ err = mlx5e_tc_act_vlan_add_rewrite_action(priv, ns_type, act,
+ attr->parse_attr, parse_state->hdrs,
+ &attr->action, parse_state->extack);
+ if (err)
+ return err;
+
+ if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
+ attr->esw_attr->split_count = attr->esw_attr->out_count;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_vlan_mangle = {
+ .can_offload = tc_act_can_offload_vlan_mangle,
+ .parse_action = tc_act_parse_vlan_mangle,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index b689701ac7d8..f832c26ff2c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -5,11 +5,14 @@
#define __MLX5_EN_TC_PRIV_H__
#include "en_tc.h"
+#include "en/tc/act/act.h"
#define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
#define MLX5E_TC_MAX_SPLITS 1
+#define mlx5e_nic_chains(priv) ((priv)->fs.tc.chains)
+
enum {
MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT,
@@ -37,6 +40,7 @@ struct mlx5e_tc_flow_parse_attr {
struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
struct ethhdr eth;
+ struct mlx5e_tc_act_parse_state parse_state;
};
/* Helper struct for accessing a struct containing list_head array.
@@ -115,7 +119,11 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr);
+bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow);
+bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow);
bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow);
+int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow);
+bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv);
static inline void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
{
@@ -176,4 +184,8 @@ struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow);
struct mlx5e_tc_int_port_priv *
mlx5e_get_int_port_priv(struct mlx5e_priv *priv);
+
+void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec);
+void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec);
+
#endif /* __MLX5_EN_TC_PRIV_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index fb5397324aa4..2db9573a3fe6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -191,7 +191,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
eseg->swp_inner_l4_offset =
(skb->csum_start + skb->head - skb->data) / 2;
- if (skb->protocol == htons(ETH_P_IPV6))
+ if (inner_ip_hdr(skb)->version == 6)
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
break;
default:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index a2a9f68579dd..15711814d2d2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -100,25 +100,6 @@ mlx5e_ktls_rx_resync_create_resp_list(void)
return resp_list;
}
-static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 rqtn)
-{
- struct mlx5e_tir_builder *builder;
- int err;
-
- builder = mlx5e_tir_builder_alloc(false);
- if (!builder)
- return -ENOMEM;
-
- mlx5e_tir_builder_build_rqt(builder, mdev->mlx5e_res.hw_objs.td.tdn, rqtn, false);
- mlx5e_tir_builder_build_direct(builder);
- mlx5e_tir_builder_build_tls(builder);
- err = mlx5e_tir_init(tir, builder, mdev, false);
-
- mlx5e_tir_builder_free(builder);
-
- return err;
-}
-
static void accel_rule_handle_work(struct work_struct *work)
{
struct mlx5e_ktls_offload_context_rx *priv_rx;
@@ -609,7 +590,6 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
struct mlx5_core_dev *mdev;
struct mlx5e_priv *priv;
int rxq, err;
- u32 rqtn;
tls_ctx = tls_get_ctx(sk);
priv = netdev_priv(netdev);
@@ -635,9 +615,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
priv_rx->sw_stats = &priv->tls->sw_stats;
mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
- rqtn = mlx5e_rx_res_get_rqtn_direct(priv->rx_res, rxq);
-
- err = mlx5e_ktls_create_tir(mdev, &priv_rx->tir, rqtn);
+ err = mlx5e_rx_res_tls_tir_create(priv->rx_res, rxq, &priv_rx->tir);
if (err)
goto err_create_tir;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 65571593ec5c..496977e7406e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2598,7 +2598,7 @@ static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
}
}
-int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
+static int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
{
u16 count = priv->channels.params.num_channels;
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 87c762066adb..6e0f88ea3701 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -1086,6 +1086,10 @@ static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
&MLX5E_STATS_GRP(pme),
&MLX5E_STATS_GRP(channels),
&MLX5E_STATS_GRP(per_port_buff_congest),
+#ifdef CONFIG_MLX5_EN_IPSEC
+ &MLX5E_STATS_GRP(ipsec_sw),
+ &MLX5E_STATS_GRP(ipsec_hw),
+#endif
};
static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index e384f6458c06..7e05d7592bce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -544,13 +544,13 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
u16 klm_entries, u16 index)
{
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- u16 entries, pi, i, header_offset, err, wqe_bbs, new_entries;
+ u16 entries, pi, header_offset, err, wqe_bbs, new_entries;
u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey;
struct page *page = shampo->last_page;
u64 addr = shampo->last_addr;
struct mlx5e_dma_info *dma_info;
struct mlx5e_umr_wqe *umr_wqe;
- int headroom;
+ int headroom, i;
headroom = rq->buff.headroom;
new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1));
@@ -602,9 +602,7 @@ update_klm:
err_unmap:
while (--i >= 0) {
- if (--index < 0)
- index = shampo->hd_per_wq - 1;
- dma_info = &shampo->info[index];
+ dma_info = &shampo->info[--index];
if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) {
dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE);
mlx5e_page_release(rq, dma_info, true);
@@ -621,7 +619,7 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
struct mlx5e_icosq *sq = rq->icosq;
int i, err, max_klm_entries, len;
- max_klm_entries = MLX5E_MAX_KLM_PER_WQE(rq->mdev);
+ max_klm_entries = MLX5E_MAX_KLM_PER_WQE;
klm_entries = bitmap_find_window(shampo->bitmap,
shampo->hd_per_wqe,
shampo->hd_per_wq, shampo->pi);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index cb66c08783c2..eec919f1b476 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -39,10 +39,6 @@
#include <linux/rhashtable.h>
#include <linux/refcount.h>
#include <linux/completion.h>
-#include <linux/if_macvlan.h>
-#include <net/tc_act/tc_pedit.h>
-#include <net/tc_act/tc_csum.h>
-#include <net/psample.h>
#include <net/arp.h>
#include <net/ipv6_stubs.h>
#include <net/bareudp.h>
@@ -62,6 +58,7 @@
#include "en/mod_hdr.h"
#include "en/tc_tun_encap.h"
#include "en/tc/sample.h"
+#include "en/tc/act/act.h"
#include "lib/devcom.h"
#include "lib/geneve.h"
#include "lib/fs_chains.h"
@@ -70,8 +67,6 @@
#include "lag/lag.h"
#include "lag/mp.h"
-#define nic_chains(priv) ((priv)->fs.tc.chains)
-
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
@@ -399,7 +394,7 @@ bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
return flow_flag_test(flow, ESWITCH);
}
-static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
+bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
{
return flow_flag_test(flow, FT);
}
@@ -409,7 +404,7 @@ bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
return flow_flag_test(flow, OFFLOADED);
}
-static int get_flow_name_space(struct mlx5e_tc_flow *flow)
+int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow)
{
return mlx5e_is_eswitch_flow(flow) ?
MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
@@ -420,7 +415,7 @@ get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- return get_flow_name_space(flow) == MLX5_FLOW_NAMESPACE_FDB ?
+ return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ?
&esw->offloads.mod_hdr :
&priv->fs.tc.mod_hdr;
}
@@ -433,7 +428,7 @@ static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
struct mlx5e_mod_hdr_handle *mh;
mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow),
- get_flow_name_space(flow),
+ mlx5e_get_flow_namespace(flow),
&parse_attr->mod_hdr_acts);
if (IS_ERR(mh))
return PTR_ERR(mh);
@@ -937,7 +932,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5_flow_context *flow_context = &spec->flow_context;
- struct mlx5_fs_chains *nic_chains = nic_chains(priv);
+ struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv);
struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
struct mlx5e_tc_table *tc = &priv->fs.tc;
struct mlx5_flow_destination dest[2] = {};
@@ -1091,7 +1086,7 @@ void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule,
struct mlx5_flow_attr *attr)
{
- struct mlx5_fs_chains *nic_chains = nic_chains(priv);
+ struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv);
mlx5_del_flow_rules(rule);
@@ -1123,7 +1118,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
mutex_lock(&priv->fs.tc.t_lock);
if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
!IS_ERR_OR_NULL(tc->t)) {
- mlx5_chains_put_table(nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL);
+ mlx5_chains_put_table(mlx5e_nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL);
priv->fs.tc.t = NULL;
}
mutex_unlock(&priv->fs.tc.t_lock);
@@ -1304,8 +1299,6 @@ static void remove_unready_flow(struct mlx5e_tc_flow *flow)
mutex_unlock(&uplink_priv->unready_flows_lock);
}
-static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv);
-
bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev)
{
struct mlx5_core_dev *out_mdev, *route_mdev;
@@ -1320,7 +1313,7 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_
route_mdev->coredev_type != MLX5_COREDEV_VF)
return false;
- return same_hw_devs(out_priv, route_priv);
+ return mlx5e_same_hw_devs(out_priv, route_priv);
}
int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
@@ -1367,7 +1360,7 @@ int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
struct mlx5_modify_hdr *mod_hdr;
mod_hdr = mlx5_modify_header_alloc(priv->mdev,
- get_flow_name_space(flow),
+ mlx5e_get_flow_namespace(flow),
mod_hdr_acts->num_actions,
mod_hdr_acts->actions);
if (IS_ERR(mod_hdr))
@@ -1441,7 +1434,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
metadata);
if (err)
- return err;
+ goto err_out;
}
}
@@ -1457,13 +1450,15 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
if (attr->chain) {
NL_SET_ERR_MSG_MOD(extack,
"Internal port rule is only supported on chain 0");
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto err_out;
}
if (attr->dest_chain) {
NL_SET_ERR_MSG_MOD(extack,
"Internal port rule offload doesn't support goto action");
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto err_out;
}
int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
@@ -1471,8 +1466,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
flow_flag_test(flow, EGRESS) ?
MLX5E_TC_INT_PORT_EGRESS :
MLX5E_TC_INT_PORT_INGRESS);
- if (IS_ERR(int_port))
- return PTR_ERR(int_port);
+ if (IS_ERR(int_port)) {
+ err = PTR_ERR(int_port);
+ goto err_out;
+ }
esw_attr->int_port = int_port;
}
@@ -2049,16 +2046,14 @@ static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
outer_headers);
}
-static void *get_match_headers_value(u32 flags,
- struct mlx5_flow_spec *spec)
+void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec)
{
return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
get_match_inner_headers_value(spec) :
get_match_outer_headers_value(spec);
}
-static void *get_match_headers_criteria(u32 flags,
- struct mlx5_flow_spec *spec)
+void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec)
{
return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
get_match_inner_headers_criteria(spec) :
@@ -2603,55 +2598,6 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
return err;
}
-struct pedit_headers {
- struct ethhdr eth;
- struct vlan_hdr vlan;
- struct iphdr ip4;
- struct ipv6hdr ip6;
- struct tcphdr tcp;
- struct udphdr udp;
-};
-
-struct pedit_headers_action {
- struct pedit_headers vals;
- struct pedit_headers masks;
- u32 pedits;
-};
-
-static int pedit_header_offsets[] = {
- [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
- [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
- [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
- [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
- [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
-};
-
-#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
-
-static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
- struct pedit_headers_action *hdrs,
- struct netlink_ext_ack *extack)
-{
- u32 *curr_pmask, *curr_pval;
-
- curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
- curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
-
- if (*curr_pmask & mask) { /* disallow acting twice on the same location */
- NL_SET_ERR_MSG_MOD(extack,
- "curr_pmask and new mask same. Acting twice on same location");
- goto out_err;
- }
-
- *curr_pmask |= mask;
- *curr_pval |= (val & mask);
-
- return 0;
-
-out_err:
- return -EOPNOTSUPP;
-}
-
struct mlx5_fields {
u8 field;
u8 field_bsize;
@@ -2772,8 +2718,8 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
u8 cmd;
mod_acts = &parse_attr->mod_hdr_acts;
- headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec);
- headers_v = get_match_headers_value(*action_flags, &parse_attr->spec);
+ headers_c = mlx5e_get_match_headers_criteria(*action_flags, &parse_attr->spec);
+ headers_v = mlx5e_get_match_headers_value(*action_flags, &parse_attr->spec);
set_masks = &hdrs[0].masks;
add_masks = &hdrs[1].masks;
@@ -2888,88 +2834,6 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
static const struct pedit_headers zero_masks = {};
-static int
-parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
- const struct flow_action_entry *act, int namespace,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
- struct netlink_ext_ack *extack)
-{
- u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
- int err = -EOPNOTSUPP;
- u32 mask, val, offset;
- u8 htype;
-
- htype = act->mangle.htype;
- err = -EOPNOTSUPP; /* can't be all optimistic */
-
- if (htype == FLOW_ACT_MANGLE_UNSPEC) {
- NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
- goto out_err;
- }
-
- if (!mlx5e_mod_hdr_max_actions(priv->mdev, namespace)) {
- NL_SET_ERR_MSG_MOD(extack,
- "The pedit offload action is not supported");
- goto out_err;
- }
-
- mask = act->mangle.mask;
- val = act->mangle.val;
- offset = act->mangle.offset;
-
- err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd], extack);
- if (err)
- goto out_err;
-
- hdrs[cmd].pedits++;
-
- return 0;
-out_err:
- return err;
-}
-
-static int
-parse_pedit_to_reformat(const struct flow_action_entry *act,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct netlink_ext_ack *extack)
-{
- u32 mask, val, offset;
- u32 *p;
-
- if (act->id != FLOW_ACTION_MANGLE) {
- NL_SET_ERR_MSG_MOD(extack, "Unsupported action id");
- return -EOPNOTSUPP;
- }
-
- if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) {
- NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported");
- return -EOPNOTSUPP;
- }
-
- mask = ~act->mangle.mask;
- val = act->mangle.val;
- offset = act->mangle.offset;
- p = (u32 *)&parse_attr->eth;
- *(p + (offset >> 2)) |= (val & mask);
-
- return 0;
-}
-
-static int parse_tc_pedit_action(struct mlx5e_priv *priv,
- const struct flow_action_entry *act, int namespace,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
- struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack)
-{
- if (flow && flow_flag_test(flow, L3_TO_L2_DECAP))
- return parse_pedit_to_reformat(act, parse_attr, extack);
-
- return parse_pedit_to_modify_hdr(priv, act, namespace,
- parse_attr, hdrs, extack);
-}
-
static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
@@ -3005,35 +2869,6 @@ out_dealloc_parsed_actions:
return err;
}
-static bool csum_offload_supported(struct mlx5e_priv *priv,
- u32 action,
- u32 update_flags,
- struct netlink_ext_ack *extack)
-{
- u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
- TCA_CSUM_UPDATE_FLAG_UDP;
-
- /* The HW recalcs checksums only if re-writing headers */
- if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
- NL_SET_ERR_MSG_MOD(extack,
- "TC csum action is only offloaded with pedit");
- netdev_warn(priv->netdev,
- "TC csum action is only offloaded with pedit\n");
- return false;
- }
-
- if (update_flags & ~prot_flags) {
- NL_SET_ERR_MSG_MOD(extack,
- "can't offload TC csum action for some header/s");
- netdev_warn(priv->netdev,
- "can't offload TC csum action for some header/s - flags %#x\n",
- update_flags);
- return false;
- }
-
- return true;
-}
-
struct ip_ttl_word {
__u8 ttl;
__u8 protocol;
@@ -3156,8 +2991,8 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv,
u8 ip_proto;
int i;
- headers_c = get_match_headers_criteria(actions, spec);
- headers_v = get_match_headers_value(actions, spec);
+ headers_c = mlx5e_get_match_headers_criteria(actions, spec);
+ headers_v = mlx5e_get_match_headers_value(actions, spec);
ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
/* for non-IP we only re-write MACs, so we're okay */
@@ -3264,7 +3099,7 @@ static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv
return priv->mdev == peer_priv->mdev;
}
-static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
+bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
{
struct mlx5_core_dev *fmdev, *pmdev;
u64 fsystem_guid, psystem_guid;
@@ -3278,126 +3113,45 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
return (fsystem_guid == psystem_guid);
}
-static bool same_vf_reps(struct mlx5e_priv *priv,
- struct net_device *out_dev)
-{
- return mlx5e_eswitch_vf_rep(priv->netdev) &&
- priv->netdev == out_dev;
-}
-
-static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
- const struct flow_action_entry *act,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
- u32 *action, struct netlink_ext_ack *extack)
-{
- u16 mask16 = VLAN_VID_MASK;
- u16 val16 = act->vlan.vid & VLAN_VID_MASK;
- const struct flow_action_entry pedit_act = {
- .id = FLOW_ACTION_MANGLE,
- .mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
- .mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
- .mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
- .mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
- };
- u8 match_prio_mask, match_prio_val;
- void *headers_c, *headers_v;
- int err;
-
- headers_c = get_match_headers_criteria(*action, &parse_attr->spec);
- headers_v = get_match_headers_value(*action, &parse_attr->spec);
-
- if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
- MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
- NL_SET_ERR_MSG_MOD(extack,
- "VLAN rewrite action must have VLAN protocol match");
- return -EOPNOTSUPP;
- }
-
- match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
- match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
- if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Changing VLAN prio is not supported");
- return -EOPNOTSUPP;
- }
-
- err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr, hdrs, NULL, extack);
- *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
-
- return err;
-}
-
static int
-add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
- u32 *action, struct netlink_ext_ack *extack)
-{
- const struct flow_action_entry prio_tag_act = {
- .vlan.vid = 0,
- .vlan.prio =
- MLX5_GET(fte_match_set_lyr_2_4,
- get_match_headers_value(*action,
- &parse_attr->spec),
- first_prio) &
- MLX5_GET(fte_match_set_lyr_2_4,
- get_match_headers_criteria(*action,
- &parse_attr->spec),
- first_prio),
- };
-
- return add_vlan_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
- &prio_tag_act, parse_attr, hdrs, action,
- extack);
-}
-
-static int validate_goto_chain(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow,
- const struct flow_action_entry *act,
- u32 actions,
- struct netlink_ext_ack *extack)
+parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
+ struct flow_action *flow_action)
{
- bool is_esw = mlx5e_is_eswitch_flow(flow);
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_tc_flow *flow = parse_state->flow;
struct mlx5_flow_attr *attr = flow->attr;
- bool ft_flow = mlx5e_is_ft_flow(flow);
- u32 dest_chain = act->chain_index;
- struct mlx5_fs_chains *chains;
- struct mlx5_eswitch *esw;
- u32 reformat_and_fwd;
- u32 max_chain;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5e_priv *priv = flow->priv;
+ const struct flow_action_entry *act;
+ struct mlx5e_tc_act *tc_act;
+ int err, i;
- esw = priv->mdev->priv.eswitch;
- chains = is_esw ? esw_chains(esw) : nic_chains(priv);
- max_chain = mlx5_chains_get_chain_range(chains);
- reformat_and_fwd = is_esw ?
- MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
- MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, reformat_and_fwd_to_table);
-
- if (ft_flow) {
- NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
- return -EOPNOTSUPP;
- }
+ ns_type = mlx5e_get_flow_namespace(flow);
- if (!mlx5_chains_backwards_supported(chains) &&
- dest_chain <= attr->chain) {
- NL_SET_ERR_MSG_MOD(extack,
- "Goto lower numbered chain isn't supported");
- return -EOPNOTSUPP;
- }
+ flow_action_for_each(i, act, flow_action) {
+ tc_act = mlx5e_tc_act_get(act->id, ns_type);
+ if (!tc_act) {
+ NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action");
+ return -EOPNOTSUPP;
+ }
- if (dest_chain > max_chain) {
- NL_SET_ERR_MSG_MOD(extack,
- "Requested destination chain is out of supported range");
- return -EOPNOTSUPP;
+ if (!tc_act->can_offload(parse_state, act, i))
+ return -EOPNOTSUPP;
+
+ err = tc_act->parse_action(parse_state, act, priv, attr);
+ if (err)
+ return err;
}
- if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
- MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
- !reformat_and_fwd) {
- NL_SET_ERR_MSG_MOD(extack,
- "Goto chain is not allowed if action has reformat or decap");
- return -EOPNOTSUPP;
+ flow_action_for_each(i, act, flow_action) {
+ tc_act = mlx5e_tc_act_get(act->id, ns_type);
+ if (!tc_act || !tc_act->post_parse ||
+ !tc_act->can_offload(parse_state, act, i))
+ continue;
+
+ err = tc_act->post_parse(parse_state, priv, attr);
+ if (err)
+ return err;
}
return 0;
@@ -3418,7 +3172,7 @@ actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
!hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits)
return 0;
- ns_type = get_flow_name_space(flow);
+ ns_type = mlx5e_get_flow_namespace(flow);
err = alloc_tc_pedit_action(priv, ns_type, parse_attr, hdrs,
&attr->action, extack);
@@ -3443,19 +3197,9 @@ actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
}
static int
-parse_tc_nic_actions(struct mlx5e_priv *priv,
- struct flow_action *flow_action,
- struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack)
+flow_action_supported(struct flow_action *flow_action,
+ struct netlink_ext_ack *extack)
{
- struct mlx5e_tc_flow_parse_attr *parse_attr;
- struct mlx5_flow_attr *attr = flow->attr;
- struct pedit_headers_action hdrs[2] = {};
- const struct flow_action_entry *act;
- struct mlx5_nic_flow_attr *nic_attr;
- u32 action = 0;
- int err, i;
-
if (!flow_action_has_entries(flow_action)) {
NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries");
return -EINVAL;
@@ -3467,108 +3211,35 @@ parse_tc_nic_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- nic_attr = attr->nic_attr;
- nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
- parse_attr = attr->parse_attr;
-
- flow_action_for_each(i, act, flow_action) {
- switch (act->id) {
- case FLOW_ACTION_ACCEPT:
- action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- break;
- case FLOW_ACTION_DROP:
- action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- break;
- case FLOW_ACTION_MANGLE:
- case FLOW_ACTION_ADD:
- err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
- parse_attr, hdrs, NULL, extack);
- if (err)
- return err;
-
- action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- break;
- case FLOW_ACTION_VLAN_MANGLE:
- err = add_vlan_rewrite_action(priv,
- MLX5_FLOW_NAMESPACE_KERNEL,
- act, parse_attr, hdrs,
- &action, extack);
- if (err)
- return err;
-
- break;
- case FLOW_ACTION_CSUM:
- if (csum_offload_supported(priv, action,
- act->csum_flags,
- extack))
- break;
-
- return -EOPNOTSUPP;
- case FLOW_ACTION_REDIRECT: {
- struct net_device *peer_dev = act->dev;
-
- if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
- same_hw_devs(priv, netdev_priv(peer_dev))) {
- parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
- flow_flag_set(flow, HAIRPIN);
- action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- } else {
- NL_SET_ERR_MSG_MOD(extack,
- "device is not on same HW, can't offload");
- netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
- peer_dev->name);
- return -EOPNOTSUPP;
- }
- }
- break;
- case FLOW_ACTION_MARK: {
- u32 mark = act->mark;
-
- if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
- NL_SET_ERR_MSG_MOD(extack,
- "Bad flow mark - only 16 bit is supported");
- return -EOPNOTSUPP;
- }
-
- nic_attr->flow_tag = mark;
- action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- }
- break;
- case FLOW_ACTION_GOTO:
- err = validate_goto_chain(priv, flow, act, action,
- extack);
- if (err)
- return err;
+ return 0;
+}
- action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- attr->dest_chain = act->chain_index;
- break;
- case FLOW_ACTION_CT:
- err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr,
- &parse_attr->mod_hdr_acts,
- act, extack);
- if (err)
- return err;
+static int
+parse_tc_nic_actions(struct mlx5e_priv *priv,
+ struct flow_action *flow_action,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_tc_act_parse_state *parse_state;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5_flow_attr *attr = flow->attr;
+ struct pedit_headers_action *hdrs;
+ int err;
- flow_flag_set(flow, CT);
- break;
- default:
- NL_SET_ERR_MSG_MOD(extack,
- "The offload action is not supported in NIC action");
- return -EOPNOTSUPP;
- }
- }
+ err = flow_action_supported(flow_action, extack);
+ if (err)
+ return err;
- attr->action = action;
+ attr->nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
+ parse_attr = attr->parse_attr;
+ parse_state = &parse_attr->parse_state;
+ mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
+ parse_state->ct_priv = get_ct_priv(priv);
+ hdrs = parse_state->hdrs;
- if (attr->dest_chain && parse_attr->mirred_ifindex[0]) {
- NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
- return -EOPNOTSUPP;
- }
+ err = parse_tc_actions(parse_state, flow_action);
+ if (err)
+ return err;
err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack);
if (err)
@@ -3590,147 +3261,7 @@ static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
mlx5e_eswitch_vf_rep(priv->netdev) &&
mlx5e_eswitch_vf_rep(peer_netdev) &&
- same_hw_devs(priv, peer_priv));
-}
-
-static int parse_tc_vlan_action(struct mlx5e_priv *priv,
- const struct flow_action_entry *act,
- struct mlx5_esw_flow_attr *attr,
- u32 *action,
- struct netlink_ext_ack *extack)
-{
- u8 vlan_idx = attr->total_vlan;
-
- if (vlan_idx >= MLX5_FS_VLAN_DEPTH) {
- NL_SET_ERR_MSG_MOD(extack, "Total vlans used is greater than supported");
- return -EOPNOTSUPP;
- }
-
- switch (act->id) {
- case FLOW_ACTION_VLAN_POP:
- if (vlan_idx) {
- if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
- MLX5_FS_VLAN_DEPTH)) {
- NL_SET_ERR_MSG_MOD(extack,
- "vlan pop action is not supported");
- return -EOPNOTSUPP;
- }
-
- *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
- } else {
- *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
- }
- break;
- case FLOW_ACTION_VLAN_PUSH:
- attr->vlan_vid[vlan_idx] = act->vlan.vid;
- attr->vlan_prio[vlan_idx] = act->vlan.prio;
- attr->vlan_proto[vlan_idx] = act->vlan.proto;
- if (!attr->vlan_proto[vlan_idx])
- attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
-
- if (vlan_idx) {
- if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
- MLX5_FS_VLAN_DEPTH)) {
- NL_SET_ERR_MSG_MOD(extack,
- "vlan push action is not supported for vlan depth > 1");
- return -EOPNOTSUPP;
- }
-
- *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
- } else {
- if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
- (act->vlan.proto != htons(ETH_P_8021Q) ||
- act->vlan.prio)) {
- NL_SET_ERR_MSG_MOD(extack,
- "vlan push action is not supported");
- return -EOPNOTSUPP;
- }
-
- *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
- }
- break;
- default:
- NL_SET_ERR_MSG_MOD(extack, "Unexpected action id for VLAN");
- return -EINVAL;
- }
-
- attr->total_vlan = vlan_idx + 1;
-
- return 0;
-}
-
-static struct net_device *get_fdb_out_dev(struct net_device *uplink_dev,
- struct net_device *out_dev)
-{
- struct net_device *fdb_out_dev = out_dev;
- struct net_device *uplink_upper;
-
- rcu_read_lock();
- uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev);
- if (uplink_upper && netif_is_lag_master(uplink_upper) &&
- uplink_upper == out_dev) {
- fdb_out_dev = uplink_dev;
- } else if (netif_is_lag_master(out_dev)) {
- fdb_out_dev = bond_option_active_slave_get_rcu(netdev_priv(out_dev));
- if (fdb_out_dev &&
- (!mlx5e_eswitch_rep(fdb_out_dev) ||
- !netdev_port_same_parent_id(fdb_out_dev, uplink_dev)))
- fdb_out_dev = NULL;
- }
- rcu_read_unlock();
- return fdb_out_dev;
-}
-
-static int add_vlan_push_action(struct mlx5e_priv *priv,
- struct mlx5_flow_attr *attr,
- struct net_device **out_dev,
- u32 *action,
- struct netlink_ext_ack *extack)
-{
- struct net_device *vlan_dev = *out_dev;
- struct flow_action_entry vlan_act = {
- .id = FLOW_ACTION_VLAN_PUSH,
- .vlan.vid = vlan_dev_vlan_id(vlan_dev),
- .vlan.proto = vlan_dev_vlan_proto(vlan_dev),
- .vlan.prio = 0,
- };
- int err;
-
- err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action, extack);
- if (err)
- return err;
-
- rcu_read_lock();
- *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), dev_get_iflink(vlan_dev));
- rcu_read_unlock();
- if (!*out_dev)
- return -ENODEV;
-
- if (is_vlan_dev(*out_dev))
- err = add_vlan_push_action(priv, attr, out_dev, action, extack);
-
- return err;
-}
-
-static int add_vlan_pop_action(struct mlx5e_priv *priv,
- struct mlx5_flow_attr *attr,
- u32 *action,
- struct netlink_ext_ack *extack)
-{
- struct flow_action_entry vlan_act = {
- .id = FLOW_ACTION_VLAN_POP,
- };
- int nest_level, err = 0;
-
- nest_level = attr->parse_attr->filter_dev->lower_level -
- priv->netdev->lower_level;
- while (nest_level--) {
- err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action, extack);
- if (err)
- return err;
- }
-
- return err;
+ mlx5e_same_hw_devs(priv, peer_priv));
}
static bool same_hw_reps(struct mlx5e_priv *priv,
@@ -3742,7 +3273,7 @@ static bool same_hw_reps(struct mlx5e_priv *priv,
return mlx5e_eswitch_rep(priv->netdev) &&
mlx5e_eswitch_rep(peer_netdev) &&
- same_hw_devs(priv, peer_priv);
+ mlx5e_same_hw_devs(priv, peer_priv);
}
static bool is_lag_dev(struct mlx5e_priv *priv,
@@ -3766,66 +3297,6 @@ bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
same_port_devs(priv, netdev_priv(out_dev));
}
-static bool is_duplicated_output_device(struct net_device *dev,
- struct net_device *out_dev,
- int *ifindexes, int if_count,
- struct netlink_ext_ack *extack)
-{
- int i;
-
- for (i = 0; i < if_count; i++) {
- if (ifindexes[i] == out_dev->ifindex) {
- NL_SET_ERR_MSG_MOD(extack,
- "can't duplicate output to same device");
- netdev_err(dev, "can't duplicate output to same device: %s\n",
- out_dev->name);
- return true;
- }
- }
-
- return false;
-}
-
-static int verify_uplink_forwarding(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow,
- struct net_device *out_dev,
- struct netlink_ext_ack *extack)
-{
- struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5e_rep_priv *rep_priv;
-
- /* Forwarding non encapsulated traffic between
- * uplink ports is allowed only if
- * termination_table_raw_traffic cap is set.
- *
- * Input vport was stored attr->in_rep.
- * In LAG case, *priv* is the private data of
- * uplink which may be not the input vport.
- */
- rep_priv = mlx5e_rep_to_rep_priv(attr->in_rep);
-
- if (!(mlx5e_eswitch_uplink_rep(rep_priv->netdev) &&
- mlx5e_eswitch_uplink_rep(out_dev)))
- return 0;
-
- if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev,
- termination_table_raw_traffic)) {
- NL_SET_ERR_MSG_MOD(extack,
- "devices are both uplink, can't offload forwarding");
- pr_err("devices %s %s are both uplink, can't offload forwarding\n",
- priv->netdev->name, out_dev->name);
- return -EOPNOTSUPP;
- } else if (out_dev != rep_priv->netdev) {
- NL_SET_ERR_MSG_MOD(extack,
- "devices are not the same uplink, can't offload forwarding");
- pr_err("devices %s %s are both uplink but not the same, can't offload forwarding\n",
- priv->netdev->name, out_dev->name);
- return -EOPNOTSUPP;
- }
- return 0;
-}
-
int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
int ifindex,
@@ -3865,386 +3336,33 @@ int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
return 0;
}
-static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
- struct flow_action *flow_action,
- struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack)
+static int
+parse_tc_fdb_actions(struct mlx5e_priv *priv,
+ struct flow_action *flow_action,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
{
- struct pedit_headers_action hdrs[2] = {};
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_tc_act_parse_state *parse_state;
struct mlx5e_tc_flow_parse_attr *parse_attr;
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5e_sample_attr sample_attr = {};
- const struct ip_tunnel_info *info = NULL;
struct mlx5_flow_attr *attr = flow->attr;
- int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
- bool ft_flow = mlx5e_is_ft_flow(flow);
- const struct flow_action_entry *act;
struct mlx5_esw_flow_attr *esw_attr;
- bool encap = false, decap = false;
- u32 action = attr->action;
- int err, i, if_count = 0;
- bool ptype_host = false;
- bool mpls_push = false;
-
- if (!flow_action_has_entries(flow_action)) {
- NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries");
- return -EINVAL;
- }
+ struct pedit_headers_action *hdrs;
+ int err;
- if (!flow_action_hw_stats_check(flow_action, extack,
- FLOW_ACTION_HW_STATS_DELAYED_BIT)) {
- NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
- return -EOPNOTSUPP;
- }
+ err = flow_action_supported(flow_action, extack);
+ if (err)
+ return err;
esw_attr = attr->esw_attr;
parse_attr = attr->parse_attr;
+ parse_state = &parse_attr->parse_state;
+ mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
+ parse_state->ct_priv = get_ct_priv(priv);
+ hdrs = parse_state->hdrs;
- flow_action_for_each(i, act, flow_action) {
- switch (act->id) {
- case FLOW_ACTION_ACCEPT:
- action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- attr->flags |= MLX5_ESW_ATTR_FLAG_ACCEPT;
- break;
- case FLOW_ACTION_PTYPE:
- if (act->ptype != PACKET_HOST) {
- NL_SET_ERR_MSG_MOD(extack,
- "skbedit ptype is only supported with type host");
- return -EOPNOTSUPP;
- }
-
- ptype_host = true;
- break;
- case FLOW_ACTION_DROP:
- action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- break;
- case FLOW_ACTION_TRAP:
- if (!flow_offload_has_one_action(flow_action)) {
- NL_SET_ERR_MSG_MOD(extack,
- "action trap is supported as a sole action only");
- return -EOPNOTSUPP;
- }
- action |= (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT);
- attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
- break;
- case FLOW_ACTION_MPLS_PUSH:
- if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
- reformat_l2_to_l3_tunnel) ||
- act->mpls_push.proto != htons(ETH_P_MPLS_UC)) {
- NL_SET_ERR_MSG_MOD(extack,
- "mpls push is supported only for mpls_uc protocol");
- return -EOPNOTSUPP;
- }
- mpls_push = true;
- break;
- case FLOW_ACTION_MPLS_POP:
- /* we only support mpls pop if it is the first action
- * and the filter net device is bareudp. Subsequent
- * actions can be pedit and the last can be mirred
- * egress redirect.
- */
- if (i) {
- NL_SET_ERR_MSG_MOD(extack,
- "mpls pop supported only as first action");
- return -EOPNOTSUPP;
- }
- if (!netif_is_bareudp(parse_attr->filter_dev)) {
- NL_SET_ERR_MSG_MOD(extack,
- "mpls pop supported only on bareudp devices");
- return -EOPNOTSUPP;
- }
-
- parse_attr->eth.h_proto = act->mpls_pop.proto;
- action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- flow_flag_set(flow, L3_TO_L2_DECAP);
- break;
- case FLOW_ACTION_MANGLE:
- case FLOW_ACTION_ADD:
- err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
- parse_attr, hdrs, flow, extack);
- if (err)
- return err;
-
- if (!flow_flag_test(flow, L3_TO_L2_DECAP)) {
- action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- esw_attr->split_count = esw_attr->out_count;
- }
- break;
- case FLOW_ACTION_CSUM:
- if (csum_offload_supported(priv, action,
- act->csum_flags, extack))
- break;
-
- return -EOPNOTSUPP;
- case FLOW_ACTION_REDIRECT_INGRESS: {
- struct net_device *out_dev;
-
- out_dev = act->dev;
- if (!out_dev)
- return -EOPNOTSUPP;
-
- if (!netif_is_ovs_master(out_dev)) {
- NL_SET_ERR_MSG_MOD(extack,
- "redirect to ingress is supported only for OVS internal ports");
- return -EOPNOTSUPP;
- }
-
- if (netif_is_ovs_master(parse_attr->filter_dev)) {
- NL_SET_ERR_MSG_MOD(extack,
- "redirect to ingress is not supported from internal port");
- return -EOPNOTSUPP;
- }
-
- if (!ptype_host) {
- NL_SET_ERR_MSG_MOD(extack,
- "redirect to int port ingress requires ptype=host action");
- return -EOPNOTSUPP;
- }
-
- if (esw_attr->out_count) {
- NL_SET_ERR_MSG_MOD(extack,
- "redirect to int port ingress is supported only as single destination");
- return -EOPNOTSUPP;
- }
-
- action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
-
- err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex,
- MLX5E_TC_INT_PORT_INGRESS,
- &action, esw_attr->out_count);
- if (err)
- return err;
-
- esw_attr->out_count++;
-
- break;
- }
- case FLOW_ACTION_REDIRECT:
- case FLOW_ACTION_MIRRED: {
- struct mlx5e_priv *out_priv;
- struct net_device *out_dev;
-
- out_dev = act->dev;
- if (!out_dev) {
- /* out_dev is NULL when filters with
- * non-existing mirred device are replayed to
- * the driver.
- */
- return -EINVAL;
- }
-
- if (mpls_push && !netif_is_bareudp(out_dev)) {
- NL_SET_ERR_MSG_MOD(extack,
- "mpls is supported only through a bareudp device");
- return -EOPNOTSUPP;
- }
-
- if (ft_flow && out_dev == priv->netdev) {
- /* Ignore forward to self rules generated
- * by adding both mlx5 devs to the flow table
- * block on a normal nft offload setup.
- */
- return -EOPNOTSUPP;
- }
-
- if (esw_attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
- NL_SET_ERR_MSG_MOD(extack,
- "can't support more output ports, can't offload forwarding");
- netdev_warn(priv->netdev,
- "can't support more than %d output ports, can't offload forwarding\n",
- esw_attr->out_count);
- return -EOPNOTSUPP;
- }
-
- action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- if (encap) {
- parse_attr->mirred_ifindex[esw_attr->out_count] =
- out_dev->ifindex;
- parse_attr->tun_info[esw_attr->out_count] =
- mlx5e_dup_tun_info(info);
- if (!parse_attr->tun_info[esw_attr->out_count])
- return -ENOMEM;
- encap = false;
- esw_attr->dests[esw_attr->out_count].flags |=
- MLX5_ESW_DEST_ENCAP;
- esw_attr->out_count++;
- /* attr->dests[].rep is resolved when we
- * handle encap
- */
- } else if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
-
- if (is_duplicated_output_device(priv->netdev,
- out_dev,
- ifindexes,
- if_count,
- extack))
- return -EOPNOTSUPP;
-
- ifindexes[if_count] = out_dev->ifindex;
- if_count++;
-
- out_dev = get_fdb_out_dev(uplink_dev, out_dev);
- if (!out_dev)
- return -ENODEV;
-
- if (is_vlan_dev(out_dev)) {
- err = add_vlan_push_action(priv, attr,
- &out_dev,
- &action, extack);
- if (err)
- return err;
- }
-
- if (is_vlan_dev(parse_attr->filter_dev)) {
- err = add_vlan_pop_action(priv, attr,
- &action, extack);
- if (err)
- return err;
- }
-
- if (netif_is_macvlan(out_dev))
- out_dev = macvlan_dev_real_dev(out_dev);
-
- err = verify_uplink_forwarding(priv, flow, out_dev, extack);
- if (err)
- return err;
-
- if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
- NL_SET_ERR_MSG_MOD(extack,
- "devices are not on same switch HW, can't offload forwarding");
- return -EOPNOTSUPP;
- }
-
- if (same_vf_reps(priv, out_dev)) {
- NL_SET_ERR_MSG_MOD(extack,
- "can't forward from a VF to itself");
- return -EOPNOTSUPP;
- }
-
- out_priv = netdev_priv(out_dev);
- rpriv = out_priv->ppriv;
- esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
- esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev;
- esw_attr->out_count++;
- } else if (netif_is_ovs_master(out_dev)) {
- err = mlx5e_set_fwd_to_int_port_actions(priv, attr,
- out_dev->ifindex,
- MLX5E_TC_INT_PORT_EGRESS,
- &action,
- esw_attr->out_count);
- if (err)
- return err;
-
- esw_attr->out_count++;
- } else if (parse_attr->filter_dev != priv->netdev) {
- /* All mlx5 devices are called to configure
- * high level device filters. Therefore, the
- * *attempt* to install a filter on invalid
- * eswitch should not trigger an explicit error
- */
- return -EINVAL;
- } else {
- NL_SET_ERR_MSG_MOD(extack,
- "devices are not on same switch HW, can't offload forwarding");
- netdev_warn(priv->netdev,
- "devices %s %s not on same switch HW, can't offload forwarding\n",
- priv->netdev->name,
- out_dev->name);
- return -EOPNOTSUPP;
- }
- }
- break;
- case FLOW_ACTION_TUNNEL_ENCAP:
- info = act->tunnel;
- if (info) {
- encap = true;
- } else {
- NL_SET_ERR_MSG_MOD(extack,
- "Zero tunnel attributes is not supported");
- return -EOPNOTSUPP;
- }
-
- break;
- case FLOW_ACTION_VLAN_PUSH:
- case FLOW_ACTION_VLAN_POP:
- if (act->id == FLOW_ACTION_VLAN_PUSH &&
- (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
- /* Replace vlan pop+push with vlan modify */
- action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
- err = add_vlan_rewrite_action(priv,
- MLX5_FLOW_NAMESPACE_FDB,
- act, parse_attr, hdrs,
- &action, extack);
- } else {
- err = parse_tc_vlan_action(priv, act, esw_attr, &action, extack);
- }
- if (err)
- return err;
-
- esw_attr->split_count = esw_attr->out_count;
- break;
- case FLOW_ACTION_VLAN_MANGLE:
- err = add_vlan_rewrite_action(priv,
- MLX5_FLOW_NAMESPACE_FDB,
- act, parse_attr, hdrs,
- &action, extack);
- if (err)
- return err;
-
- esw_attr->split_count = esw_attr->out_count;
- break;
- case FLOW_ACTION_TUNNEL_DECAP:
- decap = true;
- break;
- case FLOW_ACTION_GOTO:
- err = validate_goto_chain(priv, flow, act, action,
- extack);
- if (err)
- return err;
-
- action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- attr->dest_chain = act->chain_index;
- break;
- case FLOW_ACTION_CT:
- if (flow_flag_test(flow, SAMPLE)) {
- NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported");
- return -EOPNOTSUPP;
- }
- err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr,
- &parse_attr->mod_hdr_acts,
- act, extack);
- if (err)
- return err;
-
- flow_flag_set(flow, CT);
- esw_attr->split_count = esw_attr->out_count;
- break;
- case FLOW_ACTION_SAMPLE:
- if (flow_flag_test(flow, CT)) {
- NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported");
- return -EOPNOTSUPP;
- }
- sample_attr.rate = act->sample.rate;
- sample_attr.group_num = act->sample.psample_group->group_num;
- if (act->sample.truncate)
- sample_attr.trunc_size = act->sample.trunc_size;
- flow_flag_set(flow, SAMPLE);
- break;
- default:
- NL_SET_ERR_MSG_MOD(extack,
- "The offload action is not supported in FDB action");
- return -EOPNOTSUPP;
- }
- }
+ err = parse_tc_actions(parse_state, flow_action);
+ if (err)
+ return err;
/* Forward to/from internal port can only have 1 dest */
if ((netif_is_ovs_master(parse_attr->filter_dev) || esw_attr->dest_int_port) &&
@@ -4254,23 +3372,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- /* always set IP version for indirect table handling */
- attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true);
-
- if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
- action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
- /* For prio tag mode, replace vlan pop with rewrite vlan prio
- * tag rewrite.
- */
- action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
- err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
- &action, extack);
- if (err)
- return err;
- }
-
- attr->action = action;
-
err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack);
if (err)
return err;
@@ -4278,30 +3379,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
return -EOPNOTSUPP;
- if (attr->dest_chain && decap) {
- /* It can be supported if we'll create a mapping for
- * the tunnel device only (without tunnel), and set
- * this tunnel id with this decap flow.
- *
- * On restore (miss), we'll just set this saved tunnel
- * device.
- */
-
- NL_SET_ERR_MSG(extack, "Decap with goto isn't supported");
- netdev_warn(priv->netdev, "Decap with goto isn't supported");
- return -EOPNOTSUPP;
- }
-
- /* Allocate sample attribute only when there is a sample action and
- * no errors after parsing.
- */
- if (flow_flag_test(flow, SAMPLE)) {
- attr->sample_attr = kzalloc(sizeof(*attr->sample_attr), GFP_KERNEL);
- if (!attr->sample_attr)
- return -ENOMEM;
- *attr->sample_attr = sample_attr;
- }
-
return 0;
}
@@ -4398,7 +3475,7 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
flow->cookie = f->cookie;
flow->priv = priv;
- attr = mlx5_alloc_flow_attr(get_flow_name_space(flow));
+ attr = mlx5_alloc_flow_attr(mlx5e_get_flow_namespace(flow));
if (!attr)
goto err_free;
@@ -4493,6 +3570,9 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
if (err)
goto err_free;
+ /* always set IP version for indirect table handling */
+ flow->attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true);
+
err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
if (err)
goto err_free;
@@ -4997,7 +4077,7 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
u16 peer_vhca_id;
int bkt;
- if (!same_hw_devs(priv, peer_priv))
+ if (!mlx5e_same_hw_devs(priv, peer_priv))
return;
peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index eb042f0f5a41..5ffae9b13066 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -151,7 +151,6 @@ enum {
int mlx5e_tc_esw_init(struct rhashtable *tc_ht);
void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht);
-bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow);
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index ff0a07a91992..11bbcd5f5b8b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -130,7 +130,7 @@ static u32 esw_qos_calculate_min_rate_divider(struct mlx5_eswitch *esw,
/* If vports min rate divider is 0 but their group has bw_share configured, then
* need to set bw_share for vports to minimal value.
*/
- if (!group_level && !max_guarantee && group->bw_share)
+ if (!group_level && !max_guarantee && group && group->bw_share)
return 1;
return 0;
}
@@ -419,7 +419,7 @@ static int esw_qos_vport_update_group(struct mlx5_eswitch *esw,
return err;
/* Recalculate bw share weights of old and new groups */
- if (vport->qos.bw_share) {
+ if (vport->qos.bw_share || new_group->bw_share) {
esw_qos_normalize_vports_min_rate(esw, curr_group, extack);
esw_qos_normalize_vports_min_rate(esw, new_group, extack);
}
@@ -590,6 +590,7 @@ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
if (IS_ERR(esw->qos.group0)) {
esw_warn(dev, "E-Switch create rate group 0 failed (%ld)\n",
PTR_ERR(esw->qos.group0));
+ err = PTR_ERR(esw->qos.group0);
goto err_group0;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 98de91fc3d99..3bd5ae6ecd04 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -329,14 +329,25 @@ static bool
esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ bool result = false;
int i;
- for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
+ /* Indirect table is supported only for flows with in_port uplink
+ * and the destination is vport on the same eswitch as the uplink,
+ * return false in case at least one of destinations doesn't meet
+ * this criteria.
+ */
+ for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
if (esw_attr->dests[i].rep &&
mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
- esw_attr->dests[i].mdev))
- return true;
- return false;
+ esw_attr->dests[i].mdev)) {
+ result = true;
+ } else {
+ result = false;
+ break;
+ }
+ }
+ return result;
}
static int
@@ -2512,6 +2523,7 @@ static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
struct mlx5_eswitch *esw = master->priv.eswitch;
struct mlx5_flow_table_attr ft_attr = {
.max_fte = 1, .prio = 0, .level = 0,
+ .flags = MLX5_FLOW_TABLE_OTHER_VPORT,
};
struct mlx5_flow_namespace *egress_ns;
struct mlx5_flow_table *acl;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 7e0e04cf26f8..b406e0367af6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -38,9 +38,10 @@
#include "fs_cmd.h"
#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
+#define MLX5_FC_BULK_QUERY_ALLOC_PERIOD msecs_to_jiffies(180 * 1000)
/* Max number of counters to query in bulk read is 32K */
#define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
-#define MLX5_SF_NUM_COUNTERS_BULK 8
+#define MLX5_INIT_COUNTERS_BULK 8
#define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
#define MLX5_FC_POOL_USED_BUFF_RATIO 10
@@ -145,13 +146,15 @@ static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev,
spin_unlock(&fc_stats->counters_idr_lock);
}
-static int get_max_bulk_query_len(struct mlx5_core_dev *dev)
+static int get_init_bulk_query_len(struct mlx5_core_dev *dev)
{
- int num_counters_bulk = mlx5_core_is_sf(dev) ?
- MLX5_SF_NUM_COUNTERS_BULK :
- MLX5_SW_MAX_COUNTERS_BULK;
+ return min_t(int, MLX5_INIT_COUNTERS_BULK,
+ (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
+}
- return min_t(int, num_counters_bulk,
+static int get_max_bulk_query_len(struct mlx5_core_dev *dev)
+{
+ return min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
(1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
}
@@ -177,7 +180,7 @@ static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev,
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
bool query_more_counters = (first->id <= last_id);
- int max_bulk_len = get_max_bulk_query_len(dev);
+ int cur_bulk_len = fc_stats->bulk_query_len;
u32 *data = fc_stats->bulk_query_out;
struct mlx5_fc *counter = first;
u32 bulk_base_id;
@@ -189,7 +192,7 @@ static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev,
bulk_base_id = counter->id & ~0x3;
/* number of counters to query inc. the last counter */
- bulk_len = min_t(int, max_bulk_len,
+ bulk_len = min_t(int, cur_bulk_len,
ALIGN(last_id - bulk_base_id + 1, 4));
err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len,
@@ -230,6 +233,41 @@ static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
mlx5_fc_free(dev, counter);
}
+static void mlx5_fc_stats_bulk_query_size_increase(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ int max_bulk_len = get_max_bulk_query_len(dev);
+ unsigned long now = jiffies;
+ u32 *bulk_query_out_tmp;
+ int max_out_len;
+
+ if (fc_stats->bulk_query_alloc_failed &&
+ time_before(now, fc_stats->next_bulk_query_alloc))
+ return;
+
+ max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len);
+ bulk_query_out_tmp = kzalloc(max_out_len, GFP_KERNEL);
+ if (!bulk_query_out_tmp) {
+ mlx5_core_warn_once(dev,
+ "Can't increase flow counters bulk query buffer size, insufficient memory, bulk_size(%d)\n",
+ max_bulk_len);
+ fc_stats->bulk_query_alloc_failed = true;
+ fc_stats->next_bulk_query_alloc =
+ now + MLX5_FC_BULK_QUERY_ALLOC_PERIOD;
+ return;
+ }
+
+ kfree(fc_stats->bulk_query_out);
+ fc_stats->bulk_query_out = bulk_query_out_tmp;
+ fc_stats->bulk_query_len = max_bulk_len;
+ if (fc_stats->bulk_query_alloc_failed) {
+ mlx5_core_info(dev,
+ "Flow counters bulk query buffer size increased, bulk_size(%d)\n",
+ max_bulk_len);
+ fc_stats->bulk_query_alloc_failed = false;
+ }
+}
+
static void mlx5_fc_stats_work(struct work_struct *work)
{
struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
@@ -247,15 +285,22 @@ static void mlx5_fc_stats_work(struct work_struct *work)
queue_delayed_work(fc_stats->wq, &fc_stats->work,
fc_stats->sampling_interval);
- llist_for_each_entry(counter, addlist, addlist)
+ llist_for_each_entry(counter, addlist, addlist) {
mlx5_fc_stats_insert(dev, counter);
+ fc_stats->num_counters++;
+ }
llist_for_each_entry_safe(counter, tmp, dellist, dellist) {
mlx5_fc_stats_remove(dev, counter);
mlx5_fc_release(dev, counter);
+ fc_stats->num_counters--;
}
+ if (fc_stats->bulk_query_len < get_max_bulk_query_len(dev) &&
+ fc_stats->num_counters > get_init_bulk_query_len(dev))
+ mlx5_fc_stats_bulk_query_size_increase(dev);
+
if (time_before(now, fc_stats->next_query) ||
list_empty(&fc_stats->counters))
return;
@@ -378,8 +423,8 @@ EXPORT_SYMBOL(mlx5_fc_destroy);
int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
- int max_bulk_len;
- int max_out_len;
+ int init_bulk_len;
+ int init_out_len;
spin_lock_init(&fc_stats->counters_idr_lock);
idr_init(&fc_stats->counters_idr);
@@ -387,11 +432,12 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
init_llist_head(&fc_stats->addlist);
init_llist_head(&fc_stats->dellist);
- max_bulk_len = get_max_bulk_query_len(dev);
- max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len);
- fc_stats->bulk_query_out = kzalloc(max_out_len, GFP_KERNEL);
+ init_bulk_len = get_init_bulk_query_len(dev);
+ init_out_len = mlx5_cmd_fc_get_bulk_query_out_len(init_bulk_len);
+ fc_stats->bulk_query_out = kzalloc(init_out_len, GFP_KERNEL);
if (!fc_stats->bulk_query_out)
return -ENOMEM;
+ fc_stats->bulk_query_len = init_bulk_len;
fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
if (!fc_stats->wq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 75121bc1eaa5..737df402c927 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -840,6 +840,9 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
health->timer.expires = jiffies + msecs_to_jiffies(poll_interval_ms);
add_timer(&health->timer);
+
+ if (mlx5_core_is_pf(dev) && MLX5_CAP_MCAM_REG(dev, mrtc))
+ queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0);
}
void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
@@ -907,8 +910,6 @@ int mlx5_health_init(struct mlx5_core_dev *dev)
INIT_WORK(&health->fatal_report_work, mlx5_fw_fatal_reporter_err_work);
INIT_WORK(&health->report_work, mlx5_fw_reporter_err_work);
INIT_DELAYED_WORK(&health->update_fw_log_ts_work, mlx5_health_log_ts_update);
- if (mlx5_core_is_pf(dev))
- queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index ea1efdecc88c..051b20ec7bdb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -110,7 +110,7 @@ void mlx5i_cleanup(struct mlx5e_priv *priv)
static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv)
{
- struct mlx5e_sw_stats s = { 0 };
+ struct rtnl_link_stats64 s = {};
int i, j;
for (i = 0; i < priv->stats_nch; i++) {
@@ -128,11 +128,17 @@ static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv)
s.tx_packets += sq_stats->packets;
s.tx_bytes += sq_stats->bytes;
- s.tx_queue_dropped += sq_stats->dropped;
+ s.tx_dropped += sq_stats->dropped;
}
}
- memcpy(&priv->stats.sw, &s, sizeof(s));
+ memset(&priv->stats.sw, 0, sizeof(s));
+
+ priv->stats.sw.rx_packets = s.rx_packets;
+ priv->stats.sw.rx_bytes = s.rx_bytes;
+ priv->stats.sw.tx_packets = s.tx_packets;
+ priv->stats.sw.tx_bytes = s.tx_bytes;
+ priv->stats.sw.tx_queue_dropped = s.tx_dropped;
}
void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
index ad63dd45c8fb..a6592f9c3c05 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -608,4 +608,5 @@ void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev)
if (port_sel->tunnel)
mlx5_destroy_ttc_table(port_sel->inner.ttc);
mlx5_lag_destroy_definers(ldev);
+ memset(port_sel, 0, sizeof(*port_sel));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
index 0dd96a6b140d..c1df0d3595d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
@@ -31,11 +31,11 @@ static void tout_set(struct mlx5_core_dev *dev, u64 val, enum mlx5_timeouts_type
dev->timeouts->to[type] = val;
}
-static void tout_set_def_val(struct mlx5_core_dev *dev)
+void mlx5_tout_set_def_val(struct mlx5_core_dev *dev)
{
int i;
- for (i = MLX5_TO_FW_PRE_INIT_TIMEOUT_MS; i < MAX_TIMEOUT_TYPES; i++)
+ for (i = 0; i < MAX_TIMEOUT_TYPES; i++)
tout_set(dev, tout_def_sw_val[i], i);
}
@@ -45,7 +45,6 @@ int mlx5_tout_init(struct mlx5_core_dev *dev)
if (!dev->timeouts)
return -ENOMEM;
- tout_set_def_val(dev);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
index 31faa5c17aa9..1c42ead782fa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
@@ -34,6 +34,7 @@ int mlx5_tout_init(struct mlx5_core_dev *dev);
void mlx5_tout_cleanup(struct mlx5_core_dev *dev);
void mlx5_tout_query_iseg(struct mlx5_core_dev *dev);
int mlx5_tout_query_dtor(struct mlx5_core_dev *dev);
+void mlx5_tout_set_def_val(struct mlx5_core_dev *dev);
u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type);
#define mlx5_tout_ms(dev, type) _mlx5_tout_ms(dev, MLX5_TO_##type##_MS)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index a92a92a52346..d97c9e86d7b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -992,11 +992,7 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
if (mlx5_core_is_pf(dev))
pcie_print_link_status(dev->pdev);
- err = mlx5_tout_init(dev);
- if (err) {
- mlx5_core_err(dev, "Failed initializing timeouts, aborting\n");
- return err;
- }
+ mlx5_tout_set_def_val(dev);
/* wait for firmware to accept initialization segments configurations
*/
@@ -1005,13 +1001,13 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
if (err) {
mlx5_core_err(dev, "Firmware over %llu MS in pre-initializing state, aborting\n",
mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
- goto err_tout_cleanup;
+ return err;
}
err = mlx5_cmd_init(dev);
if (err) {
mlx5_core_err(dev, "Failed initializing command interface, aborting\n");
- goto err_tout_cleanup;
+ return err;
}
mlx5_tout_query_iseg(dev);
@@ -1075,18 +1071,16 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
mlx5_set_driver_version(dev);
- mlx5_start_health_poll(dev);
-
err = mlx5_query_hca_caps(dev);
if (err) {
mlx5_core_err(dev, "query hca failed\n");
- goto stop_health;
+ goto reclaim_boot_pages;
}
+ mlx5_start_health_poll(dev);
+
return 0;
-stop_health:
- mlx5_stop_health_poll(dev, boot);
reclaim_boot_pages:
mlx5_reclaim_startup_pages(dev);
err_disable_hca:
@@ -1094,8 +1088,6 @@ err_disable_hca:
err_cmd_cleanup:
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
-err_tout_cleanup:
- mlx5_tout_cleanup(dev);
return err;
}
@@ -1114,7 +1106,6 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
mlx5_core_disable_hca(dev, 0);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
- mlx5_tout_cleanup(dev);
return 0;
}
@@ -1476,6 +1467,12 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
mlx5_debugfs_root);
INIT_LIST_HEAD(&priv->traps);
+ err = mlx5_tout_init(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed initializing timeouts, aborting\n");
+ goto err_timeout_init;
+ }
+
err = mlx5_health_init(dev);
if (err)
goto err_health_init;
@@ -1501,6 +1498,8 @@ err_adev_init:
err_pagealloc_init:
mlx5_health_cleanup(dev);
err_health_init:
+ mlx5_tout_cleanup(dev);
+err_timeout_init:
debugfs_remove(dev->priv.dbg_root);
mutex_destroy(&priv->pgdir_mutex);
mutex_destroy(&priv->alloc_mutex);
@@ -1518,6 +1517,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
mlx5_adev_cleanup(dev);
mlx5_pagealloc_cleanup(dev);
mlx5_health_cleanup(dev);
+ mlx5_tout_cleanup(dev);
debugfs_remove_recursive(dev->priv.dbg_root);
mutex_destroy(&priv->pgdir_mutex);
mutex_destroy(&priv->alloc_mutex);
@@ -1604,12 +1604,28 @@ static void remove_one(struct pci_dev *pdev)
mlx5_devlink_free(devlink);
}
+#define mlx5_pci_trace(dev, fmt, ...) ({ \
+ struct mlx5_core_dev *__dev = (dev); \
+ mlx5_core_info(__dev, "%s Device state = %d health sensors: %d pci_status: %d. " fmt, \
+ __func__, __dev->state, mlx5_health_check_fatal_sensors(__dev), \
+ __dev->pci_status, ##__VA_ARGS__); \
+})
+
+static const char *result2str(enum pci_ers_result result)
+{
+ return result == PCI_ERS_RESULT_NEED_RESET ? "need reset" :
+ result == PCI_ERS_RESULT_DISCONNECT ? "disconnect" :
+ result == PCI_ERS_RESULT_RECOVERED ? "recovered" :
+ "unknown";
+}
+
static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ enum pci_ers_result res;
- mlx5_core_info(dev, "%s was called\n", __func__);
+ mlx5_pci_trace(dev, "Enter, pci channel state = %d\n", state);
mlx5_enter_error_state(dev, false);
mlx5_error_sw_reset(dev);
@@ -1617,8 +1633,11 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
mlx5_drain_health_wq(dev);
mlx5_pci_disable_device(dev);
- return state == pci_channel_io_perm_failure ?
+ res = state == pci_channel_io_perm_failure ?
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
+
+ mlx5_pci_trace(dev, "Exit, result = %d, %s\n", res, result2str(res));
+ return res;
}
/* wait for the device to show vital signs by waiting
@@ -1652,28 +1671,34 @@ static int wait_vital(struct pci_dev *pdev)
static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
{
+ enum pci_ers_result res = PCI_ERS_RESULT_DISCONNECT;
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
int err;
- mlx5_core_info(dev, "%s was called\n", __func__);
+ mlx5_pci_trace(dev, "Enter\n");
err = mlx5_pci_enable_device(dev);
if (err) {
mlx5_core_err(dev, "%s: mlx5_pci_enable_device failed with error code: %d\n",
__func__, err);
- return PCI_ERS_RESULT_DISCONNECT;
+ goto out;
}
pci_set_master(pdev);
pci_restore_state(pdev);
pci_save_state(pdev);
- if (wait_vital(pdev)) {
- mlx5_core_err(dev, "%s: wait_vital timed out\n", __func__);
- return PCI_ERS_RESULT_DISCONNECT;
+ err = wait_vital(pdev);
+ if (err) {
+ mlx5_core_err(dev, "%s: wait vital failed with error code: %d\n",
+ __func__, err);
+ goto out;
}
- return PCI_ERS_RESULT_RECOVERED;
+ res = PCI_ERS_RESULT_RECOVERED;
+out:
+ mlx5_pci_trace(dev, "Exit, err = %d, result = %d, %s\n", err, res, result2str(res));
+ return res;
}
static void mlx5_pci_resume(struct pci_dev *pdev)
@@ -1681,14 +1706,12 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
int err;
- mlx5_core_info(dev, "%s was called\n", __func__);
+ mlx5_pci_trace(dev, "Enter, loading driver..\n");
err = mlx5_load_one(dev);
- if (err)
- mlx5_core_err(dev, "%s: mlx5_load_one failed with error code: %d\n",
- __func__, err);
- else
- mlx5_core_info(dev, "%s: device recovered\n", __func__);
+
+ mlx5_pci_trace(dev, "Done, err = %d, device %s\n", err,
+ !err ? "recovered" : "Failed");
}
static const struct pci_error_handlers mlx5_err_handler = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
index 252d6017387d..17aa348989cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
@@ -247,7 +247,7 @@ int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
{
struct mlx5_sf_hw_table *table;
u16 max_ext_fn = 0;
- u16 ext_base_id;
+ u16 ext_base_id = 0;
u16 max_fn = 0;
u16 base_id;
int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 0d1f08bbf631..7e89d5980d5e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -47,7 +47,7 @@ static struct workqueue_struct *mlxsw_owq;
struct mlxsw_core_port {
struct devlink_port devlink_port;
void *port_driver_priv;
- u8 local_port;
+ u16 local_port;
};
void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
@@ -77,7 +77,7 @@ struct mlxsw_core {
bool enable_string_tlv;
} emad;
struct {
- u8 *mapping; /* lag_id+port_index to local_port mapping */
+ u16 *mapping; /* lag_id+port_index to local_port mapping */
} lag;
struct mlxsw_res res;
struct mlxsw_hwmon *hwmon;
@@ -718,7 +718,7 @@ static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
}
/* called with rcu read lock held */
-static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
+static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u16 local_port,
void *priv)
{
struct mlxsw_core *mlxsw_core = priv;
@@ -1959,7 +1959,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) &&
MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) {
- alloc_size = sizeof(u8) *
+ alloc_size = sizeof(*mlxsw_core->lag.mapping) *
MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) *
MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS);
mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
@@ -2130,7 +2130,7 @@ int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
EXPORT_SYMBOL(mlxsw_core_skb_transmit);
void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core,
- struct sk_buff *skb, u8 local_port)
+ struct sk_buff *skb, u16 local_port)
{
if (mlxsw_core->driver->ptp_transmitted)
mlxsw_core->driver->ptp_transmitted(mlxsw_core, skb,
@@ -2208,7 +2208,7 @@ mlxsw_core_rx_listener_state_set(struct mlxsw_core *mlxsw_core,
rxl_item->enabled = enabled;
}
-static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
+static void mlxsw_core_event_listener_func(struct sk_buff *skb, u16 local_port,
void *priv)
{
struct mlxsw_event_listener_item *event_listener_item = priv;
@@ -2641,7 +2641,7 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
{
struct mlxsw_rx_listener_item *rxl_item;
const struct mlxsw_rx_listener *rxl;
- u8 local_port;
+ u16 local_port;
bool found = false;
if (rx_info->is_lag) {
@@ -2699,7 +2699,7 @@ static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
}
void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
- u16 lag_id, u8 port_index, u8 local_port)
+ u16 lag_id, u8 port_index, u16 local_port)
{
int index = mlxsw_core_lag_mapping_index(mlxsw_core,
lag_id, port_index);
@@ -2708,8 +2708,8 @@ void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_lag_mapping_set);
-u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
- u16 lag_id, u8 port_index)
+u16 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
+ u16 lag_id, u8 port_index)
{
int index = mlxsw_core_lag_mapping_index(mlxsw_core,
lag_id, port_index);
@@ -2719,7 +2719,7 @@ u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
EXPORT_SYMBOL(mlxsw_core_lag_mapping_get);
void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
- u16 lag_id, u8 local_port)
+ u16 lag_id, u16 local_port)
{
int i;
@@ -2747,7 +2747,7 @@ u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_res_get);
-static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
+static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
enum devlink_port_flavour flavour,
u32 port_number, bool split,
u32 split_port_subnumber,
@@ -2778,7 +2778,7 @@ static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
return err;
}
-static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
+static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port)
{
struct mlxsw_core_port *mlxsw_core_port =
&mlxsw_core->ports[local_port];
@@ -2788,7 +2788,7 @@ static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
}
-int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
+int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
u32 port_number, bool split,
u32 split_port_subnumber,
bool splittable, u32 lanes,
@@ -2810,7 +2810,7 @@ int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
}
EXPORT_SYMBOL(mlxsw_core_port_init);
-void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
+void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port)
{
atomic_dec(&mlxsw_core->active_ports_count);
@@ -2845,7 +2845,7 @@ void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_cpu_port_fini);
-void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
+void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u16 local_port,
void *port_driver_priv, struct net_device *dev)
{
struct mlxsw_core_port *mlxsw_core_port =
@@ -2857,7 +2857,7 @@ void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
}
EXPORT_SYMBOL(mlxsw_core_port_eth_set);
-void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port,
+void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u16 local_port,
void *port_driver_priv)
{
struct mlxsw_core_port *mlxsw_core_port =
@@ -2869,7 +2869,7 @@ void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port,
}
EXPORT_SYMBOL(mlxsw_core_port_ib_set);
-void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port,
+void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u16 local_port,
void *port_driver_priv)
{
struct mlxsw_core_port *mlxsw_core_port =
@@ -2882,7 +2882,7 @@ void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port,
EXPORT_SYMBOL(mlxsw_core_port_clear);
enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
- u8 local_port)
+ u16 local_port)
{
struct mlxsw_core_port *mlxsw_core_port =
&mlxsw_core->ports[local_port];
@@ -2895,7 +2895,7 @@ EXPORT_SYMBOL(mlxsw_core_port_type_get);
struct devlink_port *
mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
- u8 local_port)
+ u16 local_port)
{
struct mlxsw_core_port *mlxsw_core_port =
&mlxsw_core->ports[local_port];
@@ -2905,7 +2905,7 @@ mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get);
-bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u8 local_port)
+bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port)
{
const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
int i;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 12023a550007..f30bb8614e69 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -54,7 +54,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, bool reload);
struct mlxsw_tx_info {
- u8 local_port;
+ u16 local_port;
bool is_emad;
};
@@ -67,7 +67,7 @@ struct mlxsw_rx_md_info {
u16 tx_sys_port;
u16 tx_lag_id;
};
- u8 tx_lag_port_index; /* Valid when 'tx_port_is_lag' is set. */
+ u16 tx_lag_port_index; /* Valid when 'tx_port_is_lag' is set. */
u8 tx_tc;
u8 latency_valid:1,
tx_congestion_valid:1,
@@ -82,11 +82,11 @@ bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info);
void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core,
- struct sk_buff *skb, u8 local_port);
+ struct sk_buff *skb, u16 local_port);
struct mlxsw_rx_listener {
- void (*func)(struct sk_buff *skb, u8 local_port, void *priv);
- u8 local_port;
+ void (*func)(struct sk_buff *skb, u16 local_port, void *priv);
+ u16 local_port;
u8 mirror_reason;
u16 trap_id;
};
@@ -209,7 +209,7 @@ struct mlxsw_rx_info {
u16 sys_port;
u16 lag_id;
} u;
- u8 lag_port_index;
+ u16 lag_port_index;
u8 mirror_reason;
int trap_id;
};
@@ -218,36 +218,36 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
struct mlxsw_rx_info *rx_info);
void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
- u16 lag_id, u8 port_index, u8 local_port);
-u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
- u16 lag_id, u8 port_index);
+ u16 lag_id, u8 port_index, u16 local_port);
+u16 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
+ u16 lag_id, u8 port_index);
void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
- u16 lag_id, u8 local_port);
+ u16 lag_id, u16 local_port);
void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port);
-int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
+int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
u32 port_number, bool split, u32 split_port_subnumber,
bool splittable, u32 lanes,
const unsigned char *switch_id,
unsigned char switch_id_len);
-void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port);
+void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port);
int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core,
void *port_driver_priv,
const unsigned char *switch_id,
unsigned char switch_id_len);
void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core);
-void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
+void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u16 local_port,
void *port_driver_priv, struct net_device *dev);
-void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port,
+void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u16 local_port,
void *port_driver_priv);
-void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port,
+void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u16 local_port,
void *port_driver_priv);
enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
- u8 local_port);
+ u16 local_port);
struct devlink_port *
mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
- u8 local_port);
-bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u8 local_port);
+ u16 local_port);
+bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port);
struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core);
int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
@@ -316,11 +316,11 @@ struct mlxsw_driver {
struct netlink_ext_ack *extack);
void (*fini)(struct mlxsw_core *mlxsw_core);
int (*basic_trap_groups_set)(struct mlxsw_core *mlxsw_core);
- int (*port_type_set)(struct mlxsw_core *mlxsw_core, u8 local_port,
+ int (*port_type_set)(struct mlxsw_core *mlxsw_core, u16 local_port,
enum devlink_port_type new_type);
- int (*port_split)(struct mlxsw_core *mlxsw_core, u8 local_port,
+ int (*port_split)(struct mlxsw_core *mlxsw_core, u16 local_port,
unsigned int count, struct netlink_ext_ack *extack);
- int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u8 local_port,
+ int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u16 local_port,
struct netlink_ext_ack *extack);
int (*sb_pool_get)(struct mlxsw_core *mlxsw_core,
unsigned int sb_index, u16 pool_index,
@@ -394,7 +394,7 @@ struct mlxsw_driver {
* is responsible for freeing the passed-in SKB.
*/
void (*ptp_transmitted)(struct mlxsw_core *mlxsw_core,
- struct sk_buff *skb, u8 local_port);
+ struct sk_buff *skb, u16 local_port);
u8 txhdr_len;
const struct mlxsw_config_profile *profile;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index 78d9c0196f2b..77e82e6cf6e8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -113,7 +113,7 @@ static const struct rhashtable_params mlxsw_afa_set_ht_params = {
};
struct mlxsw_afa_fwd_entry_ht_key {
- u8 local_port;
+ u16 local_port;
};
struct mlxsw_afa_fwd_entry {
@@ -555,7 +555,7 @@ int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block)
EXPORT_SYMBOL(mlxsw_afa_block_terminate);
static struct mlxsw_afa_fwd_entry *
-mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port)
+mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u16 local_port)
{
struct mlxsw_afa_fwd_entry *fwd_entry;
int err;
@@ -598,7 +598,7 @@ static void mlxsw_afa_fwd_entry_destroy(struct mlxsw_afa *mlxsw_afa,
}
static struct mlxsw_afa_fwd_entry *
-mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u8 local_port)
+mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u16 local_port)
{
struct mlxsw_afa_fwd_entry_ht_key ht_key = {0};
struct mlxsw_afa_fwd_entry *fwd_entry;
@@ -647,7 +647,7 @@ mlxsw_afa_fwd_entry_ref_destructor(struct mlxsw_afa_block *block,
}
static struct mlxsw_afa_fwd_entry_ref *
-mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port)
+mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u16 local_port)
{
struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
struct mlxsw_afa_fwd_entry *fwd_entry;
@@ -1352,7 +1352,7 @@ EXPORT_SYMBOL(mlxsw_afa_block_append_trap_and_forward);
struct mlxsw_afa_mirror {
struct mlxsw_afa_resource resource;
int span_id;
- u8 local_in_port;
+ u16 local_in_port;
bool ingress;
};
@@ -1379,7 +1379,7 @@ mlxsw_afa_mirror_destructor(struct mlxsw_afa_block *block,
}
static struct mlxsw_afa_mirror *
-mlxsw_afa_mirror_create(struct mlxsw_afa_block *block, u8 local_in_port,
+mlxsw_afa_mirror_create(struct mlxsw_afa_block *block, u16 local_in_port,
const struct net_device *out_dev, bool ingress)
{
struct mlxsw_afa_mirror *mirror;
@@ -1423,7 +1423,7 @@ mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block,
}
int
-mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, u8 local_in_port,
+mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, u16 local_in_port,
const struct net_device *out_dev, bool ingress,
struct netlink_ext_ack *extack)
{
@@ -1663,7 +1663,7 @@ mlxsw_afa_forward_pack(char *payload, enum mlxsw_afa_forward_type type,
}
int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
- u8 local_port, bool in_port,
+ u16 local_port, bool in_port,
struct netlink_ext_ack *extack)
{
struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
@@ -2038,7 +2038,7 @@ static void mlxsw_afa_sampler_pack(char *payload, u8 mirror_agent, u32 rate)
struct mlxsw_afa_sampler {
struct mlxsw_afa_resource resource;
int span_id;
- u8 local_port;
+ u16 local_port;
bool ingress;
};
@@ -2061,7 +2061,7 @@ static void mlxsw_afa_sampler_destructor(struct mlxsw_afa_block *block,
}
static struct mlxsw_afa_sampler *
-mlxsw_afa_sampler_create(struct mlxsw_afa_block *block, u8 local_port,
+mlxsw_afa_sampler_create(struct mlxsw_afa_block *block, u16 local_port,
struct psample_group *psample_group, u32 rate,
u32 trunc_size, bool truncate, bool ingress,
struct netlink_ext_ack *extack)
@@ -2104,7 +2104,7 @@ mlxsw_afa_block_append_allocated_sampler(struct mlxsw_afa_block *block,
return 0;
}
-int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u8 local_port,
+int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u16 local_port,
struct psample_group *psample_group,
u32 rate, u32 trunc_size, bool truncate,
bool ingress,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index b65bf98eb5ab..16cbd6acbb01 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -17,24 +17,24 @@ struct mlxsw_afa_ops {
void (*kvdl_set_del)(void *priv, u32 kvdl_index, bool is_first);
int (*kvdl_set_activity_get)(void *priv, u32 kvdl_index,
bool *activity);
- int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u8 local_port);
+ int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u16 local_port);
void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index);
int (*counter_index_get)(void *priv, unsigned int *p_counter_index);
void (*counter_index_put)(void *priv, unsigned int counter_index);
- int (*mirror_add)(void *priv, u8 local_in_port,
+ int (*mirror_add)(void *priv, u16 local_in_port,
const struct net_device *out_dev,
bool ingress, int *p_span_id);
- void (*mirror_del)(void *priv, u8 local_in_port, int span_id,
+ void (*mirror_del)(void *priv, u16 local_in_port, int span_id,
bool ingress);
int (*policer_add)(void *priv, u64 rate_bytes_ps, u32 burst,
u16 *p_policer_index,
struct netlink_ext_ack *extack);
void (*policer_del)(void *priv, u16 policer_index);
- int (*sampler_add)(void *priv, u8 local_port,
+ int (*sampler_add)(void *priv, u16 local_port,
struct psample_group *psample_group, u32 rate,
u32 trunc_size, bool truncate, bool ingress,
int *p_span_id, struct netlink_ext_ack *extack);
- void (*sampler_del)(void *priv, u8 local_port, int span_id,
+ void (*sampler_del)(void *priv, u16 local_port, int span_id,
bool ingress);
bool dummy_first_set;
};
@@ -62,12 +62,12 @@ int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id);
int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
u16 trap_id);
int mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block,
- u8 local_in_port,
+ u16 local_in_port,
const struct net_device *out_dev,
bool ingress,
struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
- u8 local_port, bool in_port,
+ u16 local_port, bool in_port,
struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
u16 vid, u8 pcp, u8 et,
@@ -98,7 +98,7 @@ int mlxsw_afa_block_append_police(struct mlxsw_afa_block *block,
u32 fa_index, u64 rate_bytes_ps, u32 burst,
u16 *p_policer_index,
struct netlink_ext_ack *extack);
-int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u8 local_port,
+int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u16 local_port,
struct psample_group *psample_group,
u32 rate, u32 trunc_size, bool truncate,
bool ingress,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
index ab70a873a01a..cfafbeb42586 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/item.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -367,6 +367,42 @@ mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val) \
__mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
}
+#define LOCAL_PORT_LSB_SIZE 8
+#define LOCAL_PORT_MSB_SIZE 2
+
+#define MLXSW_ITEM32_LP(_type, _cname, _offset1, _shift1, _offset2, _shift2) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, local_port) = { \
+ .offset = _offset1, \
+ .shift = _shift1, \
+ .size = {.bits = LOCAL_PORT_LSB_SIZE,}, \
+ .name = #_type "_" #_cname "_local_port", \
+}; \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, lp_msb) = { \
+ .offset = _offset2, \
+ .shift = _shift2, \
+ .size = {.bits = LOCAL_PORT_MSB_SIZE,}, \
+ .name = #_type "_" #_cname "_lp_msb", \
+}; \
+static inline u32 __maybe_unused \
+mlxsw_##_type##_##_cname##_local_port_get(const char *buf) \
+{ \
+ u32 local_port, lp_msb; \
+ \
+ local_port = __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, \
+ local_port), 0); \
+ lp_msb = __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, lp_msb), \
+ 0); \
+ return (lp_msb << LOCAL_PORT_LSB_SIZE) + local_port; \
+} \
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_local_port_set(char *buf, u32 val) \
+{ \
+ __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, local_port), 0, \
+ val & ((1 << LOCAL_PORT_LSB_SIZE) - 1)); \
+ __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, lp_msb), 0, \
+ val >> LOCAL_PORT_LSB_SIZE); \
+}
+
#define MLXSW_ITEM32_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \
_step, _instepoffset, _norealshift) \
static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 5d4dfa5ddbb5..10d13f5f9c7d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -38,7 +38,7 @@ struct mlxsw_m {
struct mlxsw_m_port {
struct net_device *dev;
struct mlxsw_m *mlxsw_m;
- u8 local_port;
+ u16 local_port;
u8 module;
};
@@ -180,7 +180,7 @@ static const struct ethtool_ops mlxsw_m_port_ethtool_ops = {
};
static int
-mlxsw_m_port_module_info_get(struct mlxsw_m *mlxsw_m, u8 local_port,
+mlxsw_m_port_module_info_get(struct mlxsw_m *mlxsw_m, u16 local_port,
u8 *p_module, u8 *p_width)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
@@ -214,7 +214,7 @@ mlxsw_m_port_dev_addr_get(struct mlxsw_m_port *mlxsw_m_port)
}
static int
-mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u8 local_port, u8 module)
+mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u16 local_port, u8 module)
{
struct mlxsw_m_port *mlxsw_m_port;
struct net_device *dev;
@@ -277,7 +277,7 @@ err_alloc_etherdev:
return err;
}
-static void mlxsw_m_port_remove(struct mlxsw_m *mlxsw_m, u8 local_port)
+static void mlxsw_m_port_remove(struct mlxsw_m *mlxsw_m, u16 local_port)
{
struct mlxsw_m_port *mlxsw_m_port = mlxsw_m->ports[local_port];
@@ -288,7 +288,7 @@ static void mlxsw_m_port_remove(struct mlxsw_m *mlxsw_m, u8 local_port)
mlxsw_core_port_fini(mlxsw_m->core, local_port);
}
-static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u8 local_port,
+static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u16 local_port,
u8 *last_module)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 8d420eb8ade2..f748b537bdab 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -69,52 +69,6 @@ MLXSW_REG_DEFINE(spad, MLXSW_REG_SPAD_ID, MLXSW_REG_SPAD_LEN);
*/
MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6);
-/* SMID - Switch Multicast ID
- * --------------------------
- * The MID record maps from a MID (Multicast ID), which is a unique identifier
- * of the multicast group within the stacking domain, into a list of local
- * ports into which the packet is replicated.
- */
-#define MLXSW_REG_SMID_ID 0x2007
-#define MLXSW_REG_SMID_LEN 0x240
-
-MLXSW_REG_DEFINE(smid, MLXSW_REG_SMID_ID, MLXSW_REG_SMID_LEN);
-
-/* reg_smid_swid
- * Switch partition ID.
- * Access: Index
- */
-MLXSW_ITEM32(reg, smid, swid, 0x00, 24, 8);
-
-/* reg_smid_mid
- * Multicast identifier - global identifier that represents the multicast group
- * across all devices.
- * Access: Index
- */
-MLXSW_ITEM32(reg, smid, mid, 0x00, 0, 16);
-
-/* reg_smid_port
- * Local port memebership (1 bit per port).
- * Access: RW
- */
-MLXSW_ITEM_BIT_ARRAY(reg, smid, port, 0x20, 0x20, 1);
-
-/* reg_smid_port_mask
- * Local port mask (1 bit per port).
- * Access: W
- */
-MLXSW_ITEM_BIT_ARRAY(reg, smid, port_mask, 0x220, 0x20, 1);
-
-static inline void mlxsw_reg_smid_pack(char *payload, u16 mid,
- u8 port, bool set)
-{
- MLXSW_REG_ZERO(smid, payload);
- mlxsw_reg_smid_swid_set(payload, 0);
- mlxsw_reg_smid_mid_set(payload, mid);
- mlxsw_reg_smid_port_set(payload, port, set);
- mlxsw_reg_smid_port_mask_set(payload, port, 1);
-}
-
/* SSPR - Switch System Port Record Register
* -----------------------------------------
* Configures the system port to local port mapping.
@@ -141,7 +95,7 @@ MLXSW_ITEM32(reg, sspr, m, 0x00, 31, 1);
*
* Access: RW
*/
-MLXSW_ITEM32(reg, sspr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, sspr, 0x00, 16, 0x00, 12);
/* reg_sspr_sub_port
* Virtual port within the physical port.
@@ -161,7 +115,7 @@ MLXSW_ITEM32(reg, sspr, sub_port, 0x00, 8, 8);
*/
MLXSW_ITEM32(reg, sspr, system_port, 0x04, 0, 16);
-static inline void mlxsw_reg_sspr_pack(char *payload, u8 local_port)
+static inline void mlxsw_reg_sspr_pack(char *payload, u16 local_port)
{
MLXSW_REG_ZERO(sspr, payload);
mlxsw_reg_sspr_m_set(payload, 1);
@@ -407,7 +361,7 @@ static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
enum mlxsw_reg_sfd_rec_policy policy,
const char *mac, u16 fid_vid,
enum mlxsw_reg_sfd_rec_action action,
- u8 local_port)
+ u16 local_port)
{
mlxsw_reg_sfd_rec_pack(payload, rec_index,
MLXSW_REG_SFD_REC_TYPE_UNICAST, mac, action);
@@ -417,15 +371,6 @@ static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port);
}
-static inline void mlxsw_reg_sfd_uc_unpack(char *payload, int rec_index,
- char *mac, u16 *p_fid_vid,
- u8 *p_local_port)
-{
- mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac);
- *p_fid_vid = mlxsw_reg_sfd_uc_fid_vid_get(payload, rec_index);
- *p_local_port = mlxsw_reg_sfd_uc_system_port_get(payload, rec_index);
-}
-
/* reg_sfd_uc_lag_sub_port
* LAG sub port.
* Must be 0 if multichannel VEPA is not enabled.
@@ -478,15 +423,6 @@ mlxsw_reg_sfd_uc_lag_pack(char *payload, int rec_index,
mlxsw_reg_sfd_uc_lag_lag_id_set(payload, rec_index, lag_id);
}
-static inline void mlxsw_reg_sfd_uc_lag_unpack(char *payload, int rec_index,
- char *mac, u16 *p_vid,
- u16 *p_lag_id)
-{
- mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac);
- *p_vid = mlxsw_reg_sfd_uc_lag_fid_vid_get(payload, rec_index);
- *p_lag_id = mlxsw_reg_sfd_uc_lag_lag_id_get(payload, rec_index);
-}
-
/* reg_sfd_mc_pgi
*
* Multicast port group index - index into the port group table.
@@ -568,19 +504,43 @@ static inline void
mlxsw_reg_sfd_uc_tunnel_pack(char *payload, int rec_index,
enum mlxsw_reg_sfd_rec_policy policy,
const char *mac, u16 fid,
- enum mlxsw_reg_sfd_rec_action action, u32 uip,
+ enum mlxsw_reg_sfd_rec_action action,
enum mlxsw_reg_sfd_uc_tunnel_protocol proto)
{
mlxsw_reg_sfd_rec_pack(payload, rec_index,
MLXSW_REG_SFD_REC_TYPE_UNICAST_TUNNEL, mac,
action);
mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
- mlxsw_reg_sfd_uc_tunnel_uip_msb_set(payload, rec_index, uip >> 24);
- mlxsw_reg_sfd_uc_tunnel_uip_lsb_set(payload, rec_index, uip);
mlxsw_reg_sfd_uc_tunnel_fid_set(payload, rec_index, fid);
mlxsw_reg_sfd_uc_tunnel_protocol_set(payload, rec_index, proto);
}
+static inline void
+mlxsw_reg_sfd_uc_tunnel_pack4(char *payload, int rec_index,
+ enum mlxsw_reg_sfd_rec_policy policy,
+ const char *mac, u16 fid,
+ enum mlxsw_reg_sfd_rec_action action, u32 uip)
+{
+ mlxsw_reg_sfd_uc_tunnel_uip_msb_set(payload, rec_index, uip >> 24);
+ mlxsw_reg_sfd_uc_tunnel_uip_lsb_set(payload, rec_index, uip);
+ mlxsw_reg_sfd_uc_tunnel_pack(payload, rec_index, policy, mac, fid,
+ action,
+ MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4);
+}
+
+static inline void
+mlxsw_reg_sfd_uc_tunnel_pack6(char *payload, int rec_index, const char *mac,
+ u16 fid, enum mlxsw_reg_sfd_rec_action action,
+ u32 uip_ptr)
+{
+ mlxsw_reg_sfd_uc_tunnel_uip_lsb_set(payload, rec_index, uip_ptr);
+ /* Only static policy is supported for IPv6 unicast tunnel entry. */
+ mlxsw_reg_sfd_uc_tunnel_pack(payload, rec_index,
+ MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY,
+ mac, fid, action,
+ MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV6);
+}
+
enum mlxsw_reg_tunnel_port {
MLXSW_REG_TUNNEL_PORT_NVE,
MLXSW_REG_TUNNEL_PORT_VPLS,
@@ -692,7 +652,7 @@ MLXSW_ITEM32_INDEXED(reg, sfn, mac_system_port, MLXSW_REG_SFN_BASE_LEN, 0, 16,
static inline void mlxsw_reg_sfn_mac_unpack(char *payload, int rec_index,
char *mac, u16 *p_vid,
- u8 *p_local_port)
+ u16 *p_local_port)
{
mlxsw_reg_sfn_rec_mac_memcpy_from(payload, rec_index, mac);
*p_vid = mlxsw_reg_sfn_mac_fid_get(payload, rec_index);
@@ -781,7 +741,7 @@ MLXSW_REG_DEFINE(spms, MLXSW_REG_SPMS_ID, MLXSW_REG_SPMS_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, spms, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, spms, 0x00, 16, 0x00, 12);
enum mlxsw_reg_spms_state {
MLXSW_REG_SPMS_STATE_NO_CHANGE,
@@ -800,7 +760,7 @@ enum mlxsw_reg_spms_state {
*/
MLXSW_ITEM_BIT_ARRAY(reg, spms, state, 0x04, 0x400, 2);
-static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port)
+static inline void mlxsw_reg_spms_pack(char *payload, u16 local_port)
{
MLXSW_REG_ZERO(spms, payload);
mlxsw_reg_spms_local_port_set(payload, local_port);
@@ -833,7 +793,7 @@ MLXSW_ITEM32(reg, spvid, tport, 0x00, 24, 1);
* When tport = 1: Tunnel port.
* Access: Index
*/
-MLXSW_ITEM32(reg, spvid, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, spvid, 0x00, 16, 0x00, 12);
/* reg_spvid_sub_port
* Virtual port within the physical port.
@@ -868,7 +828,7 @@ MLXSW_ITEM32(reg, spvid, et_vlan, 0x04, 16, 2);
*/
MLXSW_ITEM32(reg, spvid, pvid, 0x04, 0, 12);
-static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid,
+static inline void mlxsw_reg_spvid_pack(char *payload, u16 local_port, u16 pvid,
u8 et_vlan)
{
MLXSW_REG_ZERO(spvid, payload);
@@ -911,7 +871,7 @@ MLXSW_ITEM32(reg, spvm, pte, 0x00, 30, 1);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, spvm, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, spvm, 0x00, 16, 0x00, 12);
/* reg_spvm_sub_port
* Virtual port within the physical port.
@@ -959,7 +919,7 @@ MLXSW_ITEM32_INDEXED(reg, spvm, rec_vid,
MLXSW_REG_SPVM_BASE_LEN, 0, 12,
MLXSW_REG_SPVM_REC_LEN, 0, false);
-static inline void mlxsw_reg_spvm_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_spvm_pack(char *payload, u16 local_port,
u16 vid_begin, u16 vid_end,
bool is_member, bool untagged)
{
@@ -994,7 +954,7 @@ MLXSW_REG_DEFINE(spaft, MLXSW_REG_SPAFT_ID, MLXSW_REG_SPAFT_LEN);
*
* Note: CPU port is not supported (all tag types are allowed).
*/
-MLXSW_ITEM32(reg, spaft, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, spaft, 0x00, 16, 0x00, 12);
/* reg_spaft_sub_port
* Virtual port within the physical port.
@@ -1021,7 +981,7 @@ MLXSW_ITEM32(reg, spaft, allow_prio_tagged, 0x04, 30, 1);
*/
MLXSW_ITEM32(reg, spaft, allow_tagged, 0x04, 29, 1);
-static inline void mlxsw_reg_spaft_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_spaft_pack(char *payload, u16 local_port,
bool allow_untagged)
{
MLXSW_REG_ZERO(spaft, payload);
@@ -1126,76 +1086,6 @@ mlxsw_reg_sfgc_pack(char *payload, enum mlxsw_reg_sfgc_type type,
mlxsw_reg_sfgc_mid_set(payload, MLXSW_PORT_MID);
}
-/* SFTR - Switch Flooding Table Register
- * -------------------------------------
- * The switch flooding table is used for flooding packet replication. The table
- * defines a bit mask of ports for packet replication.
- */
-#define MLXSW_REG_SFTR_ID 0x2012
-#define MLXSW_REG_SFTR_LEN 0x420
-
-MLXSW_REG_DEFINE(sftr, MLXSW_REG_SFTR_ID, MLXSW_REG_SFTR_LEN);
-
-/* reg_sftr_swid
- * Switch partition ID with which to associate the port.
- * Access: Index
- */
-MLXSW_ITEM32(reg, sftr, swid, 0x00, 24, 8);
-
-/* reg_sftr_flood_table
- * Flooding table index to associate with the specific type on the specific
- * switch partition.
- * Access: Index
- */
-MLXSW_ITEM32(reg, sftr, flood_table, 0x00, 16, 6);
-
-/* reg_sftr_index
- * Index. Used as an index into the Flooding Table in case the table is
- * configured to use VID / FID or FID Offset.
- * Access: Index
- */
-MLXSW_ITEM32(reg, sftr, index, 0x00, 0, 16);
-
-/* reg_sftr_table_type
- * See mlxsw_flood_table_type
- * Access: RW
- */
-MLXSW_ITEM32(reg, sftr, table_type, 0x04, 16, 3);
-
-/* reg_sftr_range
- * Range of entries to update
- * Access: Index
- */
-MLXSW_ITEM32(reg, sftr, range, 0x04, 0, 16);
-
-/* reg_sftr_port
- * Local port membership (1 bit per port).
- * Access: RW
- */
-MLXSW_ITEM_BIT_ARRAY(reg, sftr, port, 0x20, 0x20, 1);
-
-/* reg_sftr_cpu_port_mask
- * CPU port mask (1 bit per port).
- * Access: W
- */
-MLXSW_ITEM_BIT_ARRAY(reg, sftr, port_mask, 0x220, 0x20, 1);
-
-static inline void mlxsw_reg_sftr_pack(char *payload,
- unsigned int flood_table,
- unsigned int index,
- enum mlxsw_flood_table_type table_type,
- unsigned int range, u8 port, bool set)
-{
- MLXSW_REG_ZERO(sftr, payload);
- mlxsw_reg_sftr_swid_set(payload, 0);
- mlxsw_reg_sftr_flood_table_set(payload, flood_table);
- mlxsw_reg_sftr_index_set(payload, index);
- mlxsw_reg_sftr_table_type_set(payload, table_type);
- mlxsw_reg_sftr_range_set(payload, range);
- mlxsw_reg_sftr_port_set(payload, port, set);
- mlxsw_reg_sftr_port_mask_set(payload, port, 1);
-}
-
/* SFDF - Switch Filtering DB Flush
* --------------------------------
* The switch filtering DB flush register is used to flush the FDB.
@@ -1347,7 +1237,7 @@ MLXSW_ITEM32(reg, sldr, num_ports, 0x04, 24, 8);
MLXSW_ITEM32_INDEXED(reg, sldr, system_port, 0x08, 0, 16, 4, 0, false);
static inline void mlxsw_reg_sldr_lag_add_port_pack(char *payload, u8 lag_id,
- u8 local_port)
+ u16 local_port)
{
MLXSW_REG_ZERO(sldr, payload);
mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_ADD_PORT_LIST);
@@ -1357,7 +1247,7 @@ static inline void mlxsw_reg_sldr_lag_add_port_pack(char *payload, u8 lag_id,
}
static inline void mlxsw_reg_sldr_lag_remove_port_pack(char *payload, u8 lag_id,
- u8 local_port)
+ u16 local_port)
{
MLXSW_REG_ZERO(sldr, payload);
mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_REMOVE_PORT_LIST);
@@ -1397,7 +1287,7 @@ MLXSW_ITEM32(reg, slcr, pp, 0x00, 24, 1);
* Reserved when pp = Global Configuration
* Access: Index
*/
-MLXSW_ITEM32(reg, slcr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, slcr, 0x00, 16, 0x00, 12);
enum mlxsw_reg_slcr_type {
MLXSW_REG_SLCR_TYPE_CRC, /* default */
@@ -1515,7 +1405,7 @@ MLXSW_ITEM32(reg, slcor, col, 0x00, 30, 2);
* Not supported for CPU port
* Access: Index
*/
-MLXSW_ITEM32(reg, slcor, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, slcor, 0x00, 16, 0x00, 12);
/* reg_slcor_lag_id
* LAG Identifier. Index into the LAG descriptor table.
@@ -1531,7 +1421,7 @@ MLXSW_ITEM32(reg, slcor, lag_id, 0x00, 0, 10);
MLXSW_ITEM32(reg, slcor, port_index, 0x04, 0, 10);
static inline void mlxsw_reg_slcor_pack(char *payload,
- u8 local_port, u16 lag_id,
+ u16 local_port, u16 lag_id,
enum mlxsw_reg_slcor_col col)
{
MLXSW_REG_ZERO(slcor, payload);
@@ -1541,7 +1431,7 @@ static inline void mlxsw_reg_slcor_pack(char *payload,
}
static inline void mlxsw_reg_slcor_port_add_pack(char *payload,
- u8 local_port, u16 lag_id,
+ u16 local_port, u16 lag_id,
u8 port_index)
{
mlxsw_reg_slcor_pack(payload, local_port, lag_id,
@@ -1550,21 +1440,21 @@ static inline void mlxsw_reg_slcor_port_add_pack(char *payload,
}
static inline void mlxsw_reg_slcor_port_remove_pack(char *payload,
- u8 local_port, u16 lag_id)
+ u16 local_port, u16 lag_id)
{
mlxsw_reg_slcor_pack(payload, local_port, lag_id,
MLXSW_REG_SLCOR_COL_LAG_REMOVE_PORT);
}
static inline void mlxsw_reg_slcor_col_enable_pack(char *payload,
- u8 local_port, u16 lag_id)
+ u16 local_port, u16 lag_id)
{
mlxsw_reg_slcor_pack(payload, local_port, lag_id,
MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_ENABLED);
}
static inline void mlxsw_reg_slcor_col_disable_pack(char *payload,
- u8 local_port, u16 lag_id)
+ u16 local_port, u16 lag_id)
{
mlxsw_reg_slcor_pack(payload, local_port, lag_id,
MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_ENABLED);
@@ -1583,7 +1473,7 @@ MLXSW_REG_DEFINE(spmlr, MLXSW_REG_SPMLR_ID, MLXSW_REG_SPMLR_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, spmlr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, spmlr, 0x00, 16, 0x00, 12);
/* reg_spmlr_sub_port
* Virtual port within the physical port.
@@ -1611,7 +1501,7 @@ enum mlxsw_reg_spmlr_learn_mode {
*/
MLXSW_ITEM32(reg, spmlr, learn_mode, 0x04, 30, 2);
-static inline void mlxsw_reg_spmlr_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_spmlr_pack(char *payload, u16 local_port,
enum mlxsw_reg_spmlr_learn_mode mode)
{
MLXSW_REG_ZERO(spmlr, payload);
@@ -1642,7 +1532,7 @@ MLXSW_ITEM32(reg, svfa, swid, 0x00, 24, 8);
*
* Note: Reserved for 802.1Q FIDs.
*/
-MLXSW_ITEM32(reg, svfa, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, svfa, 0x00, 16, 0x00, 12);
enum mlxsw_reg_svfa_mt {
MLXSW_REG_SVFA_MT_VID_TO_FID,
@@ -1696,7 +1586,7 @@ MLXSW_ITEM32(reg, svfa, counter_set_type, 0x08, 24, 8);
*/
MLXSW_ITEM32(reg, svfa, counter_index, 0x08, 0, 24);
-static inline void mlxsw_reg_svfa_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_svfa_pack(char *payload, u16 local_port,
enum mlxsw_reg_svfa_mt mt, bool valid,
u16 fid, u16 vid)
{
@@ -1733,7 +1623,7 @@ MLXSW_ITEM32(reg, spvtr, tport, 0x00, 24, 1);
* When tport = 1: tunnel port.
* Access: Index
*/
-MLXSW_ITEM32(reg, spvtr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, spvtr, 0x00, 16, 0x00, 12);
/* reg_spvtr_ippe
* Ingress Port Prio Mode Update Enable.
@@ -1803,7 +1693,7 @@ enum mlxsw_reg_spvtr_epvid_mode {
MLXSW_ITEM32(reg, spvtr, epvid_mode, 0x04, 0, 4);
static inline void mlxsw_reg_spvtr_pack(char *payload, bool tport,
- u8 local_port,
+ u16 local_port,
enum mlxsw_reg_spvtr_ipvid_mode ipvid_mode)
{
MLXSW_REG_ZERO(spvtr, payload);
@@ -1828,7 +1718,7 @@ MLXSW_REG_DEFINE(svpe, MLXSW_REG_SVPE_ID, MLXSW_REG_SVPE_LEN);
*
* Note: CPU port is not supported (uses VLAN mode only).
*/
-MLXSW_ITEM32(reg, svpe, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, svpe, 0x00, 16, 0x00, 12);
/* reg_svpe_vp_en
* Virtual port enable.
@@ -1838,7 +1728,7 @@ MLXSW_ITEM32(reg, svpe, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, svpe, vp_en, 0x00, 8, 1);
-static inline void mlxsw_reg_svpe_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_svpe_pack(char *payload, u16 local_port,
bool enable)
{
MLXSW_REG_ZERO(svpe, payload);
@@ -1948,7 +1838,7 @@ MLXSW_REG_DEFINE(spvmlr, MLXSW_REG_SPVMLR_ID, MLXSW_REG_SPVMLR_LEN);
*
* Note: CPU port is not supported.
*/
-MLXSW_ITEM32(reg, spvmlr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, spvmlr, 0x00, 16, 0x00, 12);
/* reg_spvmlr_num_rec
* Number of records to update.
@@ -1971,7 +1861,7 @@ MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_learn_enable, MLXSW_REG_SPVMLR_BASE_LEN,
MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_vid, MLXSW_REG_SPVMLR_BASE_LEN, 0, 12,
MLXSW_REG_SPVMLR_REC_LEN, 0x00, false);
-static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_spvmlr_pack(char *payload, u16 local_port,
u16 vid_begin, u16 vid_end,
bool learn_enable)
{
@@ -2009,7 +1899,7 @@ MLXSW_REG_DEFINE(spvc, MLXSW_REG_SPVC_ID, MLXSW_REG_SPVC_LEN);
* through Rx port i and a Tx port j then port i and port j must have the
* same configuration.
*/
-MLXSW_ITEM32(reg, spvc, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, spvc, 0x00, 16, 0x00, 12);
/* reg_spvc_inner_et2
* Vlan Tag1 EtherType2 enable.
@@ -2074,7 +1964,7 @@ MLXSW_ITEM32(reg, spvc, inner_et0, 0x08, 1, 1);
*/
MLXSW_ITEM32(reg, spvc, et0, 0x08, 0, 1);
-static inline void mlxsw_reg_spvc_pack(char *payload, u8 local_port, bool et1,
+static inline void mlxsw_reg_spvc_pack(char *payload, u16 local_port, bool et1,
bool et0)
{
MLXSW_REG_ZERO(spvc, payload);
@@ -2104,7 +1994,7 @@ MLXSW_REG_DEFINE(spevet, MLXSW_REG_SPEVET_ID, MLXSW_REG_SPEVET_LEN);
* Not supported to CPU port.
* Access: Index
*/
-MLXSW_ITEM32(reg, spevet, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, spevet, 0x00, 16, 0x00, 12);
/* reg_spevet_et_vlan
* Egress EtherType VLAN to push when SPVID.egr_et_set field set for the packet:
@@ -2115,7 +2005,7 @@ MLXSW_ITEM32(reg, spevet, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, spevet, et_vlan, 0x04, 16, 2);
-static inline void mlxsw_reg_spevet_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_spevet_pack(char *payload, u16 local_port,
u8 et_vlan)
{
MLXSW_REG_ZERO(spevet, payload);
@@ -2123,6 +2013,122 @@ static inline void mlxsw_reg_spevet_pack(char *payload, u8 local_port,
mlxsw_reg_spevet_et_vlan_set(payload, et_vlan);
}
+/* SFTR-V2 - Switch Flooding Table Version 2 Register
+ * --------------------------------------------------
+ * The switch flooding table is used for flooding packet replication. The table
+ * defines a bit mask of ports for packet replication.
+ */
+#define MLXSW_REG_SFTR2_ID 0x202F
+#define MLXSW_REG_SFTR2_LEN 0x120
+
+MLXSW_REG_DEFINE(sftr2, MLXSW_REG_SFTR2_ID, MLXSW_REG_SFTR2_LEN);
+
+/* reg_sftr2_swid
+ * Switch partition ID with which to associate the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr2, swid, 0x00, 24, 8);
+
+/* reg_sftr2_flood_table
+ * Flooding table index to associate with the specific type on the specific
+ * switch partition.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr2, flood_table, 0x00, 16, 6);
+
+/* reg_sftr2_index
+ * Index. Used as an index into the Flooding Table in case the table is
+ * configured to use VID / FID or FID Offset.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr2, index, 0x00, 0, 16);
+
+/* reg_sftr2_table_type
+ * See mlxsw_flood_table_type
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sftr2, table_type, 0x04, 16, 3);
+
+/* reg_sftr2_range
+ * Range of entries to update
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr2, range, 0x04, 0, 16);
+
+/* reg_sftr2_port
+ * Local port membership (1 bit per port).
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sftr2, port, 0x20, 0x80, 1);
+
+/* reg_sftr2_port_mask
+ * Local port mask (1 bit per port).
+ * Access: WO
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sftr2, port_mask, 0xA0, 0x80, 1);
+
+static inline void mlxsw_reg_sftr2_pack(char *payload,
+ unsigned int flood_table,
+ unsigned int index,
+ enum mlxsw_flood_table_type table_type,
+ unsigned int range, u16 port, bool set)
+{
+ MLXSW_REG_ZERO(sftr2, payload);
+ mlxsw_reg_sftr2_swid_set(payload, 0);
+ mlxsw_reg_sftr2_flood_table_set(payload, flood_table);
+ mlxsw_reg_sftr2_index_set(payload, index);
+ mlxsw_reg_sftr2_table_type_set(payload, table_type);
+ mlxsw_reg_sftr2_range_set(payload, range);
+ mlxsw_reg_sftr2_port_set(payload, port, set);
+ mlxsw_reg_sftr2_port_mask_set(payload, port, 1);
+}
+
+/* SMID-V2 - Switch Multicast ID Version 2 Register
+ * ------------------------------------------------
+ * The MID record maps from a MID (Multicast ID), which is a unique identifier
+ * of the multicast group within the stacking domain, into a list of local
+ * ports into which the packet is replicated.
+ */
+#define MLXSW_REG_SMID2_ID 0x2034
+#define MLXSW_REG_SMID2_LEN 0x120
+
+MLXSW_REG_DEFINE(smid2, MLXSW_REG_SMID2_ID, MLXSW_REG_SMID2_LEN);
+
+/* reg_smid2_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, smid2, swid, 0x00, 24, 8);
+
+/* reg_smid2_mid
+ * Multicast identifier - global identifier that represents the multicast group
+ * across all devices.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, smid2, mid, 0x00, 0, 16);
+
+/* reg_smid2_port
+ * Local port memebership (1 bit per port).
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, smid2, port, 0x20, 0x80, 1);
+
+/* reg_smid2_port_mask
+ * Local port mask (1 bit per port).
+ * Access: WO
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, smid2, port_mask, 0xA0, 0x80, 1);
+
+static inline void mlxsw_reg_smid2_pack(char *payload, u16 mid, u16 port,
+ bool set)
+{
+ MLXSW_REG_ZERO(smid2, payload);
+ mlxsw_reg_smid2_swid_set(payload, 0);
+ mlxsw_reg_smid2_mid_set(payload, mid);
+ mlxsw_reg_smid2_port_set(payload, port, set);
+ mlxsw_reg_smid2_port_mask_set(payload, port, 1);
+}
+
/* CWTP - Congetion WRED ECN TClass Profile
* ----------------------------------------
* Configures the profiles for queues of egress port and traffic class
@@ -2139,7 +2145,7 @@ MLXSW_REG_DEFINE(cwtp, MLXSW_REG_CWTP_ID, MLXSW_REG_CWTP_LEN);
* Not supported for CPU port
* Access: Index
*/
-MLXSW_ITEM32(reg, cwtp, local_port, 0, 16, 8);
+MLXSW_ITEM32_LP(reg, cwtp, 0x00, 16, 0x00, 12);
/* reg_cwtp_traffic_class
* Traffic Class to configure
@@ -2173,7 +2179,7 @@ MLXSW_ITEM32_INDEXED(reg, cwtp, profile_max, MLXSW_REG_CWTP_BASE_LEN,
#define MLXSW_REG_CWTP_MAX_PROFILE 2
#define MLXSW_REG_CWTP_DEFAULT_PROFILE 1
-static inline void mlxsw_reg_cwtp_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_cwtp_pack(char *payload, u16 local_port,
u8 traffic_class)
{
int i;
@@ -2217,7 +2223,7 @@ MLXSW_REG_DEFINE(cwtpm, MLXSW_REG_CWTPM_ID, MLXSW_REG_CWTPM_LEN);
* Not supported for CPU port
* Access: Index
*/
-MLXSW_ITEM32(reg, cwtpm, local_port, 0, 16, 8);
+MLXSW_ITEM32_LP(reg, cwtpm, 0x00, 16, 0x00, 12);
/* reg_cwtpm_traffic_class
* Traffic Class to configure
@@ -2291,7 +2297,7 @@ MLXSW_ITEM32(reg, cwtpm, ntcp_r, 64, 0, 2);
#define MLXSW_REG_CWTPM_RESET_PROFILE 0
-static inline void mlxsw_reg_cwtpm_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_cwtpm_pack(char *payload, u16 local_port,
u8 traffic_class, u8 profile,
bool wred, bool ecn)
{
@@ -2363,7 +2369,7 @@ MLXSW_ITEM32(reg, ppbt, op, 0x00, 28, 3);
* Local port. Not including CPU port.
* Access: Index
*/
-MLXSW_ITEM32(reg, ppbt, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, ppbt, 0x00, 16, 0x00, 12);
/* reg_ppbt_g
* group - When set, the binding is of an ACL group. When cleared,
@@ -2382,7 +2388,7 @@ MLXSW_ITEM32(reg, ppbt, acl_info, 0x10, 0, 16);
static inline void mlxsw_reg_ppbt_pack(char *payload, enum mlxsw_reg_pxbt_e e,
enum mlxsw_reg_pxbt_op op,
- u8 local_port, u16 acl_info)
+ u16 local_port, u16 acl_info)
{
MLXSW_REG_ZERO(ppbt, payload);
mlxsw_reg_ppbt_e_set(payload, e);
@@ -3513,7 +3519,7 @@ MLXSW_REG_DEFINE(qpts, MLXSW_REG_QPTS_ID, MLXSW_REG_QPTS_LEN);
*
* Note: CPU port is supported.
*/
-MLXSW_ITEM32(reg, qpts, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, qpts, 0x00, 16, 0x00, 12);
enum mlxsw_reg_qpts_trust_state {
MLXSW_REG_QPTS_TRUST_STATE_PCP = 1,
@@ -3526,7 +3532,7 @@ enum mlxsw_reg_qpts_trust_state {
*/
MLXSW_ITEM32(reg, qpts, trust_state, 0x04, 0, 3);
-static inline void mlxsw_reg_qpts_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_qpts_pack(char *payload, u16 local_port,
enum mlxsw_reg_qpts_trust_state ts)
{
MLXSW_REG_ZERO(qpts, payload);
@@ -3717,7 +3723,7 @@ MLXSW_REG_DEFINE(qtct, MLXSW_REG_QTCT_ID, MLXSW_REG_QTCT_LEN);
*
* Note: CPU port is not supported.
*/
-MLXSW_ITEM32(reg, qtct, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, qtct, 0x00, 16, 0x00, 12);
/* reg_qtct_sub_port
* Virtual port within the physical port.
@@ -3742,7 +3748,7 @@ MLXSW_ITEM32(reg, qtct, switch_prio, 0x00, 0, 4);
*/
MLXSW_ITEM32(reg, qtct, tclass, 0x04, 0, 4);
-static inline void mlxsw_reg_qtct_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_qtct_pack(char *payload, u16 local_port,
u8 switch_prio, u8 tclass)
{
MLXSW_REG_ZERO(qtct, payload);
@@ -3766,7 +3772,7 @@ MLXSW_REG_DEFINE(qeec, MLXSW_REG_QEEC_ID, MLXSW_REG_QEEC_LEN);
*
* Note: CPU port is supported.
*/
-MLXSW_ITEM32(reg, qeec, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, qeec, 0x00, 16, 0x00, 12);
enum mlxsw_reg_qeec_hr {
MLXSW_REG_QEEC_HR_PORT,
@@ -3909,7 +3915,7 @@ MLXSW_ITEM32(reg, qeec, max_shaper_bs, 0x1C, 0, 6);
#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2 11
#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3 11
-static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_qeec_pack(char *payload, u16 local_port,
enum mlxsw_reg_qeec_hr hr, u8 index,
u8 next_index)
{
@@ -3920,7 +3926,7 @@ static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
mlxsw_reg_qeec_next_element_index_set(payload, next_index);
}
-static inline void mlxsw_reg_qeec_ptps_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_qeec_ptps_pack(char *payload, u16 local_port,
bool ptps)
{
MLXSW_REG_ZERO(qeec, payload);
@@ -3944,7 +3950,7 @@ MLXSW_REG_DEFINE(qrwe, MLXSW_REG_QRWE_ID, MLXSW_REG_QRWE_LEN);
*
* Note: CPU port is supported. No support for router port.
*/
-MLXSW_ITEM32(reg, qrwe, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, qrwe, 0x00, 16, 0x00, 12);
/* reg_qrwe_dscp
* Whether to enable DSCP rewrite (default is 0, don't rewrite).
@@ -3958,7 +3964,7 @@ MLXSW_ITEM32(reg, qrwe, dscp, 0x04, 1, 1);
*/
MLXSW_ITEM32(reg, qrwe, pcp, 0x04, 0, 1);
-static inline void mlxsw_reg_qrwe_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_qrwe_pack(char *payload, u16 local_port,
bool rewrite_pcp, bool rewrite_dscp)
{
MLXSW_REG_ZERO(qrwe, payload);
@@ -3985,7 +3991,7 @@ MLXSW_REG_DEFINE(qpdsm, MLXSW_REG_QPDSM_ID, MLXSW_REG_QPDSM_LEN);
* Local Port. Supported for data packets from CPU port.
* Access: Index
*/
-MLXSW_ITEM32(reg, qpdsm, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, qpdsm, 0x00, 16, 0x00, 12);
/* reg_qpdsm_prio_entry_color0_e
* Enable update of the entry for color 0 and a given port.
@@ -4038,7 +4044,7 @@ MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color2_dscp,
MLXSW_REG_QPDSM_BASE_LEN, 8, 6,
MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
-static inline void mlxsw_reg_qpdsm_pack(char *payload, u8 local_port)
+static inline void mlxsw_reg_qpdsm_pack(char *payload, u16 local_port)
{
MLXSW_REG_ZERO(qpdsm, payload);
mlxsw_reg_qpdsm_local_port_set(payload, local_port);
@@ -4071,7 +4077,7 @@ MLXSW_REG_DEFINE(qpdp, MLXSW_REG_QPDP_ID, MLXSW_REG_QPDP_LEN);
* Local Port. Supported for data packets from CPU port.
* Access: Index
*/
-MLXSW_ITEM32(reg, qpdp, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, qpdp, 0x00, 16, 0x00, 12);
/* reg_qpdp_switch_prio
* Default port Switch Priority (default 0)
@@ -4079,7 +4085,7 @@ MLXSW_ITEM32(reg, qpdp, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, qpdp, switch_prio, 0x04, 0, 4);
-static inline void mlxsw_reg_qpdp_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_qpdp_pack(char *payload, u16 local_port,
u8 switch_prio)
{
MLXSW_REG_ZERO(qpdp, payload);
@@ -4106,7 +4112,7 @@ MLXSW_REG_DEFINE(qpdpm, MLXSW_REG_QPDPM_ID, MLXSW_REG_QPDPM_LEN);
* Local Port. Supported for data packets from CPU port.
* Access: Index
*/
-MLXSW_ITEM32(reg, qpdpm, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, qpdpm, 0x00, 16, 0x00, 12);
/* reg_qpdpm_dscp_e
* Enable update of the specific entry. When cleared, the switch_prio and color
@@ -4125,7 +4131,7 @@ MLXSW_ITEM16_INDEXED(reg, qpdpm, dscp_entry_prio,
MLXSW_REG_QPDPM_BASE_LEN, 0, 4,
MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN, 0x00, false);
-static inline void mlxsw_reg_qpdpm_pack(char *payload, u8 local_port)
+static inline void mlxsw_reg_qpdpm_pack(char *payload, u16 local_port)
{
MLXSW_REG_ZERO(qpdpm, payload);
mlxsw_reg_qpdpm_local_port_set(payload, local_port);
@@ -4157,7 +4163,7 @@ MLXSW_REG_DEFINE(qtctm, MLXSW_REG_QTCTM_ID, MLXSW_REG_QTCTM_LEN);
* No support for CPU port.
* Access: Index
*/
-MLXSW_ITEM32(reg, qtctm, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, qtctm, 0x00, 16, 0x00, 12);
/* reg_qtctm_mc
* Multicast Mode
@@ -4167,7 +4173,7 @@ MLXSW_ITEM32(reg, qtctm, local_port, 0x00, 16, 8);
MLXSW_ITEM32(reg, qtctm, mc, 0x04, 0, 1);
static inline void
-mlxsw_reg_qtctm_pack(char *payload, u8 local_port, bool mc)
+mlxsw_reg_qtctm_pack(char *payload, u16 local_port, bool mc)
{
MLXSW_REG_ZERO(qtctm, payload);
mlxsw_reg_qtctm_local_port_set(payload, local_port);
@@ -4300,7 +4306,7 @@ MLXSW_ITEM32(reg, pmlp, rxtx, 0x00, 31, 1);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pmlp, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pmlp, 0x00, 16, 0x00, 12);
/* reg_pmlp_width
* 0 - Unmap local port.
@@ -4331,7 +4337,7 @@ MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 4, 0x04, 0x00, false);
*/
MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 4, 0x04, 0x00, false);
-static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port)
+static inline void mlxsw_reg_pmlp_pack(char *payload, u16 local_port)
{
MLXSW_REG_ZERO(pmlp, payload);
mlxsw_reg_pmlp_local_port_set(payload, local_port);
@@ -4350,7 +4356,7 @@ MLXSW_REG_DEFINE(pmtu, MLXSW_REG_PMTU_ID, MLXSW_REG_PMTU_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pmtu, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pmtu, 0x00, 16, 0x00, 12);
/* reg_pmtu_max_mtu
* Maximum MTU.
@@ -4378,7 +4384,7 @@ MLXSW_ITEM32(reg, pmtu, admin_mtu, 0x08, 16, 16);
*/
MLXSW_ITEM32(reg, pmtu, oper_mtu, 0x0C, 16, 16);
-static inline void mlxsw_reg_pmtu_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_pmtu_pack(char *payload, u16 local_port,
u16 new_mtu)
{
MLXSW_REG_ZERO(pmtu, payload);
@@ -4412,7 +4418,7 @@ MLXSW_ITEM32(reg, ptys, an_disable_admin, 0x00, 30, 1);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, ptys, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, ptys, 0x00, 16, 0x00, 12);
#define MLXSW_REG_PTYS_PROTO_MASK_IB BIT(0)
#define MLXSW_REG_PTYS_PROTO_MASK_ETH BIT(2)
@@ -4572,7 +4578,7 @@ enum mlxsw_reg_ptys_connector_type {
*/
MLXSW_ITEM32(reg, ptys, connector_type, 0x2C, 0, 4);
-static inline void mlxsw_reg_ptys_eth_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_ptys_eth_pack(char *payload, u16 local_port,
u32 proto_admin, bool autoneg)
{
MLXSW_REG_ZERO(ptys, payload);
@@ -4582,7 +4588,7 @@ static inline void mlxsw_reg_ptys_eth_pack(char *payload, u8 local_port,
mlxsw_reg_ptys_an_disable_admin_set(payload, !autoneg);
}
-static inline void mlxsw_reg_ptys_ext_eth_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_ptys_ext_eth_pack(char *payload, u16 local_port,
u32 proto_admin, bool autoneg)
{
MLXSW_REG_ZERO(ptys, payload);
@@ -4624,7 +4630,7 @@ static inline void mlxsw_reg_ptys_ext_eth_unpack(char *payload,
mlxsw_reg_ptys_ext_eth_proto_oper_get(payload);
}
-static inline void mlxsw_reg_ptys_ib_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_ptys_ib_pack(char *payload, u16 local_port,
u16 proto_admin, u16 link_width)
{
MLXSW_REG_ZERO(ptys, payload);
@@ -4672,7 +4678,7 @@ MLXSW_ITEM32(reg, ppad, single_base_mac, 0x00, 28, 1);
* port number, if single_base_mac = 0 then local_port is reserved
* Access: RW
*/
-MLXSW_ITEM32(reg, ppad, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, ppad, 0x00, 16, 0x00, 24);
/* reg_ppad_mac
* If single_base_mac = 0 - base MAC address, mac[7:0] is reserved.
@@ -4682,7 +4688,7 @@ MLXSW_ITEM32(reg, ppad, local_port, 0x00, 16, 8);
MLXSW_ITEM_BUF(reg, ppad, mac, 0x02, 6);
static inline void mlxsw_reg_ppad_pack(char *payload, bool single_base_mac,
- u8 local_port)
+ u16 local_port)
{
MLXSW_REG_ZERO(ppad, payload);
mlxsw_reg_ppad_single_base_mac_set(payload, !!single_base_mac);
@@ -4711,7 +4717,7 @@ MLXSW_ITEM32(reg, paos, swid, 0x00, 24, 8);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, paos, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, paos, 0x00, 16, 0x00, 12);
/* reg_paos_admin_status
* Port administrative state (the desired state of the port):
@@ -4756,7 +4762,7 @@ MLXSW_ITEM32(reg, paos, ee, 0x04, 30, 1);
*/
MLXSW_ITEM32(reg, paos, e, 0x04, 0, 2);
-static inline void mlxsw_reg_paos_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_paos_pack(char *payload, u16 local_port,
enum mlxsw_port_admin_status status)
{
MLXSW_REG_ZERO(paos, payload);
@@ -4782,7 +4788,7 @@ MLXSW_REG_DEFINE(pfcc, MLXSW_REG_PFCC_ID, MLXSW_REG_PFCC_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pfcc, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pfcc, 0x00, 16, 0x00, 12);
/* reg_pfcc_pnat
* Port number access type. Determines the way local_port is interpreted:
@@ -4899,7 +4905,7 @@ static inline void mlxsw_reg_pfcc_prio_pack(char *payload, u8 pfc_en)
mlxsw_reg_pfcc_pfcrx_set(payload, pfc_en);
}
-static inline void mlxsw_reg_pfcc_pack(char *payload, u8 local_port)
+static inline void mlxsw_reg_pfcc_pack(char *payload, u16 local_port)
{
MLXSW_REG_ZERO(pfcc, payload);
mlxsw_reg_pfcc_local_port_set(payload, local_port);
@@ -4928,11 +4934,9 @@ MLXSW_ITEM32(reg, ppcnt, swid, 0x00, 24, 8);
/* reg_ppcnt_local_port
* Local port number.
- * 255 indicates all ports on the device, and is only allowed
- * for Set() operation.
* Access: Index
*/
-MLXSW_ITEM32(reg, ppcnt, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, ppcnt, 0x00, 16, 0x00, 12);
/* reg_ppcnt_pnat
* Port number access type:
@@ -4981,6 +4985,14 @@ MLXSW_ITEM32(reg, ppcnt, grp, 0x00, 0, 6);
*/
MLXSW_ITEM32(reg, ppcnt, clr, 0x04, 31, 1);
+/* reg_ppcnt_lp_gl
+ * Local port global variable.
+ * 0: local_port 255 = all ports of the device.
+ * 1: local_port indicates local port number for all ports.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ppcnt, lp_gl, 0x04, 30, 1);
+
/* reg_ppcnt_prio_tc
* Priority for counter set that support per priority, valid values: 0-7.
* Traffic class for counter set that support per traffic class,
@@ -5404,7 +5416,7 @@ MLXSW_ITEM64(reg, ppcnt, wred_discard,
MLXSW_ITEM64(reg, ppcnt, ecn_marked_tc,
MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
-static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_ppcnt_pack(char *payload, u16 local_port,
enum mlxsw_reg_ppcnt_grp grp,
u8 prio_tc)
{
@@ -5414,6 +5426,7 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port,
mlxsw_reg_ppcnt_pnat_set(payload, 0);
mlxsw_reg_ppcnt_grp_set(payload, grp);
mlxsw_reg_ppcnt_clr_set(payload, 0);
+ mlxsw_reg_ppcnt_lp_gl_set(payload, 1);
mlxsw_reg_ppcnt_prio_tc_set(payload, prio_tc);
}
@@ -5430,7 +5443,7 @@ MLXSW_REG_DEFINE(plib, MLXSW_REG_PLIB_ID, MLXSW_REG_PLIB_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, plib, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, plib, 0x00, 16, 0x00, 12);
/* reg_plib_ib_port
* InfiniBand port remapping for local_port.
@@ -5468,7 +5481,7 @@ MLXSW_ITEM32(reg, pptb, mm, 0x00, 28, 2);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pptb, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pptb, 0x00, 16, 0x00, 12);
/* reg_pptb_um
* Enables the update of the untagged_buf field.
@@ -5515,7 +5528,7 @@ MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff_msb, 0x0C, 0x04, 4);
#define MLXSW_REG_PPTB_ALL_PRIO 0xFF
-static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port)
+static inline void mlxsw_reg_pptb_pack(char *payload, u16 local_port)
{
MLXSW_REG_ZERO(pptb, payload);
mlxsw_reg_pptb_mm_set(payload, MLXSW_REG_PPTB_MM_UM);
@@ -5545,7 +5558,7 @@ MLXSW_REG_DEFINE(pbmc, MLXSW_REG_PBMC_ID, MLXSW_REG_PBMC_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pbmc, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pbmc, 0x00, 16, 0x00, 12);
/* reg_pbmc_xoff_timer_value
* When device generates a pause frame, it uses this value as the pause
@@ -5612,7 +5625,7 @@ MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xoff_threshold, 0x0C, 16, 16,
MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xon_threshold, 0x0C, 0, 16,
0x08, 0x04, false);
-static inline void mlxsw_reg_pbmc_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_pbmc_pack(char *payload, u16 local_port,
u16 xoff_timer_value, u16 xoff_refresh)
{
MLXSW_REG_ZERO(pbmc, payload);
@@ -5661,7 +5674,7 @@ MLXSW_ITEM32(reg, pspa, swid, 0x00, 24, 8);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pspa, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pspa, 0x00, 16, 0x00, 0);
/* reg_pspa_sub_port
* Virtual port within the local port. Set to 0 when virtual ports are
@@ -5670,7 +5683,7 @@ MLXSW_ITEM32(reg, pspa, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, pspa, sub_port, 0x00, 8, 8);
-static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port)
+static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u16 local_port)
{
MLXSW_REG_ZERO(pspa, payload);
mlxsw_reg_pspa_swid_set(payload, swid);
@@ -5772,7 +5785,7 @@ MLXSW_REG_DEFINE(pplr, MLXSW_REG_PPLR_ID, MLXSW_REG_PPLR_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pplr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pplr, 0x00, 16, 0x00, 12);
/* Phy local loopback. When set the port's egress traffic is looped back
* to the receiver and the port transmitter is disabled.
@@ -5785,7 +5798,7 @@ MLXSW_ITEM32(reg, pplr, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, pplr, lb_en, 0x04, 0, 8);
-static inline void mlxsw_reg_pplr_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_pplr_pack(char *payload, u16 local_port,
bool phy_local)
{
MLXSW_REG_ZERO(pplr, payload);
@@ -5846,7 +5859,7 @@ MLXSW_ITEM32(reg, pmtdb, status, 0x00, 0, 4);
* the module.
* Access: RO
*/
-MLXSW_ITEM16_INDEXED(reg, pmtdb, port_num, 0x04, 0, 8, 0x02, 0x00, false);
+MLXSW_ITEM16_INDEXED(reg, pmtdb, port_num, 0x04, 0, 10, 0x02, 0x00, false);
static inline void mlxsw_reg_pmtdb_pack(char *payload, u8 slot_index, u8 module,
u8 ports_width, u8 num_ports)
@@ -5915,7 +5928,7 @@ MLXSW_REG_DEFINE(pddr, MLXSW_REG_PDDR_ID, MLXSW_REG_PDDR_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pddr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pddr, 0x00, 16, 0x00, 12);
enum mlxsw_reg_pddr_page_select {
MLXSW_REG_PDDR_PAGE_SELECT_TROUBLESHOOTING_INFO = 1,
@@ -5944,7 +5957,7 @@ MLXSW_ITEM32(reg, pddr, trblsh_group_opcode, 0x08, 0, 16);
*/
MLXSW_ITEM32(reg, pddr, trblsh_status_opcode, 0x0C, 0, 16);
-static inline void mlxsw_reg_pddr_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_pddr_pack(char *payload, u16 local_port,
u8 page_select)
{
MLXSW_REG_ZERO(pddr, payload);
@@ -6014,7 +6027,7 @@ MLXSW_REG_DEFINE(pllp, MLXSW_REG_PLLP_ID, MLXSW_REG_PLLP_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pllp, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pllp, 0x00, 16, 0x00, 12);
/* reg_pllp_label_port
* Front panel label of the port.
@@ -6034,7 +6047,7 @@ MLXSW_ITEM32(reg, pllp, split_num, 0x04, 0, 4);
*/
MLXSW_ITEM32(reg, pllp, slot_index, 0x08, 0, 4);
-static inline void mlxsw_reg_pllp_pack(char *payload, u8 local_port)
+static inline void mlxsw_reg_pllp_pack(char *payload, u16 local_port)
{
MLXSW_REG_ZERO(pllp, payload);
mlxsw_reg_pllp_local_port_set(payload, local_port);
@@ -10245,7 +10258,7 @@ MLXSW_REG_DEFINE(mpar, MLXSW_REG_MPAR_ID, MLXSW_REG_MPAR_LEN);
* The local port to mirror the packets from.
* Access: Index
*/
-MLXSW_ITEM32(reg, mpar, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, mpar, 0x00, 16, 0x00, 4);
enum mlxsw_reg_mpar_i_e {
MLXSW_REG_MPAR_TYPE_EGRESS,
@@ -10282,7 +10295,7 @@ MLXSW_ITEM32(reg, mpar, pa_id, 0x04, 0, 4);
*/
MLXSW_ITEM32(reg, mpar, probability_rate, 0x08, 0, 32);
-static inline void mlxsw_reg_mpar_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_mpar_pack(char *payload, u16 local_port,
enum mlxsw_reg_mpar_i_e i_e,
bool enable, u8 pa_id,
u32 probability_rate)
@@ -10386,7 +10399,7 @@ MLXSW_REG_DEFINE(mlcr, MLXSW_REG_MLCR_ID, MLXSW_REG_MLCR_LEN);
* Local port number.
* Access: RW
*/
-MLXSW_ITEM32(reg, mlcr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, mlcr, 0x00, 16, 0x00, 24);
#define MLXSW_REG_MLCR_DURATION_MAX 0xFFFF
@@ -10405,7 +10418,7 @@ MLXSW_ITEM32(reg, mlcr, beacon_duration, 0x04, 0, 16);
*/
MLXSW_ITEM32(reg, mlcr, beacon_remain, 0x08, 0, 16);
-static inline void mlxsw_reg_mlcr_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_mlcr_pack(char *payload, u16 local_port,
bool active)
{
MLXSW_REG_ZERO(mlcr, payload);
@@ -10778,7 +10791,7 @@ MLXSW_REG_DEFINE(mpsc, MLXSW_REG_MPSC_ID, MLXSW_REG_MPSC_LEN);
* Not supported for CPU port
* Access: Index
*/
-MLXSW_ITEM32(reg, mpsc, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, mpsc, 0x00, 16, 0x00, 12);
/* reg_mpsc_e
* Enable sampling on port local_port
@@ -10795,7 +10808,7 @@ MLXSW_ITEM32(reg, mpsc, e, 0x04, 30, 1);
*/
MLXSW_ITEM32(reg, mpsc, rate, 0x08, 0, 32);
-static inline void mlxsw_reg_mpsc_pack(char *payload, u8 local_port, bool e,
+static inline void mlxsw_reg_mpsc_pack(char *payload, u16 local_port, bool e,
u32 rate)
{
MLXSW_REG_ZERO(mpsc, payload);
@@ -11003,7 +11016,7 @@ MLXSW_REG_DEFINE(momte, MLXSW_REG_MOMTE_ID, MLXSW_REG_MOMTE_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, momte, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, momte, 0x00, 16, 0x00, 12);
enum mlxsw_reg_momte_type {
MLXSW_REG_MOMTE_TYPE_WRED = 0x20,
@@ -11030,7 +11043,7 @@ MLXSW_ITEM32(reg, momte, type, 0x04, 0, 8);
*/
MLXSW_ITEM_BIT_ARRAY(reg, momte, tclass_en, 0x08, 0x08, 1);
-static inline void mlxsw_reg_momte_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_momte_pack(char *payload, u16 local_port,
enum mlxsw_reg_momte_type type)
{
MLXSW_REG_ZERO(momte, payload);
@@ -11098,7 +11111,7 @@ MLXSW_REG_DEFINE(mtpptr, MLXSW_REG_MTPPTR_ID, MLXSW_REG_MTPPTR_LEN);
* Not supported for CPU port.
* Access: Index
*/
-MLXSW_ITEM32(reg, mtpptr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, mtpptr, 0x00, 16, 0x00, 12);
enum mlxsw_reg_mtpptr_dir {
MLXSW_REG_MTPPTR_DIR_INGRESS,
@@ -11692,7 +11705,7 @@ MLXSW_REG_DEFINE(tnqdr, MLXSW_REG_TNQDR_ID, MLXSW_REG_TNQDR_LEN);
* Local port number (receive port). CPU port is supported.
* Access: Index
*/
-MLXSW_ITEM32(reg, tnqdr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, tnqdr, 0x00, 16, 0x00, 12);
/* reg_tnqdr_dscp
* For encapsulation, the default DSCP.
@@ -11700,7 +11713,7 @@ MLXSW_ITEM32(reg, tnqdr, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, tnqdr, dscp, 0x04, 0, 6);
-static inline void mlxsw_reg_tnqdr_pack(char *payload, u8 local_port)
+static inline void mlxsw_reg_tnqdr_pack(char *payload, u16 local_port)
{
MLXSW_REG_ZERO(tnqdr, payload);
mlxsw_reg_tnqdr_local_port_set(payload, local_port);
@@ -12028,7 +12041,7 @@ MLXSW_REG_DEFINE(sbcm, MLXSW_REG_SBCM_ID, MLXSW_REG_SBCM_LEN);
* For Egress: excludes IP Router
* Access: Index
*/
-MLXSW_ITEM32(reg, sbcm, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, sbcm, 0x00, 16, 0x00, 4);
/* reg_sbcm_pg_buff
* PG buffer - Port PG (dir=ingress) / traffic class (dir=egress)
@@ -12082,7 +12095,7 @@ MLXSW_ITEM32(reg, sbcm, max_buff, 0x1C, 0, 24);
*/
MLXSW_ITEM32(reg, sbcm, pool, 0x24, 0, 4);
-static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff,
+static inline void mlxsw_reg_sbcm_pack(char *payload, u16 local_port, u8 pg_buff,
enum mlxsw_reg_sbxx_dir dir,
u32 min_buff, u32 max_buff,
bool infi_max, u8 pool)
@@ -12114,7 +12127,7 @@ MLXSW_REG_DEFINE(sbpm, MLXSW_REG_SBPM_ID, MLXSW_REG_SBPM_LEN);
* For Egress: excludes IP Router
* Access: Index
*/
-MLXSW_ITEM32(reg, sbpm, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, sbpm, 0x00, 16, 0x00, 12);
/* reg_sbpm_pool
* The pool associated to quota counting on the local_port.
@@ -12168,7 +12181,7 @@ MLXSW_ITEM32(reg, sbpm, min_buff, 0x18, 0, 24);
*/
MLXSW_ITEM32(reg, sbpm, max_buff, 0x1C, 0, 24);
-static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool,
+static inline void mlxsw_reg_sbpm_pack(char *payload, u16 local_port, u8 pool,
enum mlxsw_reg_sbxx_dir dir, bool clr,
u32 min_buff, u32 max_buff)
{
@@ -12266,6 +12279,16 @@ MLXSW_REG_DEFINE(sbsr, MLXSW_REG_SBSR_ID, MLXSW_REG_SBSR_LEN);
*/
MLXSW_ITEM32(reg, sbsr, clr, 0x00, 31, 1);
+#define MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE 256
+
+/* reg_sbsr_port_page
+ * Determines the range of the ports specified in the 'ingress_port_mask'
+ * and 'egress_port_mask' bit masks.
+ * {ingress,egress}_port_mask[x] is (256 * port_page) + x
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbsr, port_page, 0x04, 0, 4);
+
/* reg_sbsr_ingress_port_mask
* Bit vector for all ingress network ports.
* Indicates which of the ports (for which the relevant bit is set)
@@ -12353,7 +12376,7 @@ MLXSW_REG_DEFINE(sbib, MLXSW_REG_SBIB_ID, MLXSW_REG_SBIB_LEN);
* Not supported for CPU port and router port
* Access: Index
*/
-MLXSW_ITEM32(reg, sbib, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, sbib, 0x00, 16, 0x00, 12);
/* reg_sbib_buff_size
* Units represented in cells
@@ -12363,7 +12386,7 @@ MLXSW_ITEM32(reg, sbib, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, sbib, buff_size, 0x08, 0, 24);
-static inline void mlxsw_reg_sbib_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_sbib_pack(char *payload, u16 local_port,
u32 buff_size)
{
MLXSW_REG_ZERO(sbib, payload);
@@ -12374,7 +12397,6 @@ static inline void mlxsw_reg_sbib_pack(char *payload, u8 local_port,
static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(sgcr),
MLXSW_REG(spad),
- MLXSW_REG(smid),
MLXSW_REG(sspr),
MLXSW_REG(sfdat),
MLXSW_REG(sfd),
@@ -12384,7 +12406,6 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(spvm),
MLXSW_REG(spaft),
MLXSW_REG(sfgc),
- MLXSW_REG(sftr),
MLXSW_REG(sfdf),
MLXSW_REG(sldr),
MLXSW_REG(slcr),
@@ -12397,6 +12418,8 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(spvmlr),
MLXSW_REG(spvc),
MLXSW_REG(spevet),
+ MLXSW_REG(sftr2),
+ MLXSW_REG(smid2),
MLXSW_REG(cwtp),
MLXSW_REG(cwtpm),
MLXSW_REG(pgcr),
@@ -12556,7 +12579,7 @@ MLXSW_ITEM32(reg, pude, swid, 0x00, 24, 8);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pude, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pude, 0x00, 16, 0x00, 12);
/* reg_pude_admin_status
* Port administrative state (the desired state).
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index f1ed7660b0b5..5251f33af0fb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -46,8 +46,8 @@
#include "spectrum_trap.h"
#define MLXSW_SP1_FWREV_MAJOR 13
-#define MLXSW_SP1_FWREV_MINOR 2008
-#define MLXSW_SP1_FWREV_SUBMINOR 3326
+#define MLXSW_SP1_FWREV_MINOR 2010
+#define MLXSW_SP1_FWREV_SUBMINOR 1006
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -63,8 +63,8 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
"." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
#define MLXSW_SP2_FWREV_MAJOR 29
-#define MLXSW_SP2_FWREV_MINOR 2008
-#define MLXSW_SP2_FWREV_SUBMINOR 3326
+#define MLXSW_SP2_FWREV_MINOR 2010
+#define MLXSW_SP2_FWREV_SUBMINOR 1006
static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
.major = MLXSW_SP2_FWREV_MAJOR,
@@ -78,8 +78,8 @@ static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
"." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2"
#define MLXSW_SP3_FWREV_MAJOR 30
-#define MLXSW_SP3_FWREV_MINOR 2008
-#define MLXSW_SP3_FWREV_SUBMINOR 3326
+#define MLXSW_SP3_FWREV_MINOR 2010
+#define MLXSW_SP3_FWREV_SUBMINOR 1006
static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
.major = MLXSW_SP3_FWREV_MAJOR,
@@ -352,7 +352,7 @@ static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
}
static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp,
- u8 local_port, u8 swid)
+ u16 local_port, u8 swid)
{
char pspa_pl[MLXSW_REG_PSPA_LEN];
@@ -483,7 +483,7 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
}
static int
-mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
struct mlxsw_sp_port_mapping *port_mapping)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
@@ -535,7 +535,7 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port,
}
static int
-mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port,
const struct mlxsw_sp_port_mapping *port_mapping)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
@@ -560,7 +560,7 @@ err_pmlp_write:
return err;
}
-static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port,
u8 module)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
@@ -1474,7 +1474,7 @@ mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp,
- u8 local_port, u8 *port_number,
+ u16 local_port, u8 *port_number,
u8 *split_port_subnumber,
u8 *slot_index)
{
@@ -1490,7 +1490,7 @@ static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
bool split,
struct mlxsw_sp_port_mapping *port_mapping)
{
@@ -1781,7 +1781,7 @@ err_port_swid_set:
return err;
}
-static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
u8 module = mlxsw_sp_port->mapping.module;
@@ -1848,12 +1848,12 @@ static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
kfree(mlxsw_sp_port);
}
-static bool mlxsw_sp_local_port_valid(u8 local_port)
+static bool mlxsw_sp_local_port_valid(u16 local_port)
{
return local_port != MLXSW_PORT_CPU_PORT;
}
-static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port)
{
if (!mlxsw_sp_local_port_valid(local_port))
return false;
@@ -1971,7 +1971,7 @@ mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp,
split_port_mapping = *port_mapping;
split_port_mapping.width /= count;
for (i = 0; i < count; i++) {
- u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+ u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
if (!mlxsw_sp_local_port_valid(s_local_port))
continue;
@@ -1987,7 +1987,7 @@ mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp,
err_port_create:
for (i--; i >= 0; i--) {
- u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+ u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
@@ -2004,7 +2004,7 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
/* Go over original unsplit ports in the gap and recreate them. */
for (i = 0; i < count; i++) {
- u8 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+ u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
port_mapping = mlxsw_sp->port_mapping[local_port];
if (!port_mapping || !mlxsw_sp_local_port_valid(local_port))
@@ -2015,14 +2015,14 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
}
static struct mlxsw_sp_port *
-mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u16 local_port)
{
if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
return mlxsw_sp->ports[local_port];
return NULL;
}
-static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
+static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port,
unsigned int count,
struct netlink_ext_ack *extack)
{
@@ -2065,7 +2065,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
port_mapping = mlxsw_sp_port->mapping;
for (i = 0; i < count; i++) {
- u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+ u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
@@ -2085,7 +2085,7 @@ err_port_split_create:
return err;
}
-static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
+static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
@@ -2121,7 +2121,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
}
for (i = 0; i < count; i++) {
- u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+ u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
@@ -2148,7 +2148,7 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
struct mlxsw_sp_port *mlxsw_sp_port;
enum mlxsw_reg_pude_oper_status status;
unsigned int max_ports;
- u8 local_port;
+ u16 local_port;
max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
local_port = mlxsw_reg_pude_local_port_get(pude_pl);
@@ -2174,7 +2174,7 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp,
char *mtpptr_pl, bool ingress)
{
- u8 local_port;
+ u16 local_port;
u8 num_rec;
int i;
@@ -2212,7 +2212,7 @@ static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg,
}
void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
- u8 local_port, void *priv)
+ u16 local_port, void *priv)
{
struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
@@ -2236,7 +2236,7 @@ void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
netif_receive_skb(skb);
}
-static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port,
void *priv)
{
skb->offload_fwd_mark = 1;
@@ -2244,7 +2244,7 @@ static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
}
static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
- u8 local_port, void *priv)
+ u16 local_port, void *priv)
{
skb->offload_l3_fwd_mark = 1;
skb->offload_fwd_mark = 1;
@@ -2252,7 +2252,7 @@ static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
}
void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
- u8 local_port)
+ u16 local_port)
{
mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
}
@@ -2755,6 +2755,140 @@ static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp)
mutex_destroy(&mlxsw_sp->parsing.lock);
}
+struct mlxsw_sp_ipv6_addr_node {
+ struct in6_addr key;
+ struct rhash_head ht_node;
+ u32 kvdl_index;
+ refcount_t refcount;
+};
+
+static const struct rhashtable_params mlxsw_sp_ipv6_addr_ht_params = {
+ .key_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, key),
+ .head_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, ht_node),
+ .key_len = sizeof(struct in6_addr),
+ .automatic_shrinking = true,
+};
+
+static int
+mlxsw_sp_ipv6_addr_init(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6,
+ u32 *p_kvdl_index)
+{
+ struct mlxsw_sp_ipv6_addr_node *node;
+ char rips_pl[MLXSW_REG_RIPS_LEN];
+ int err;
+
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp,
+ MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
+ p_kvdl_index);
+ if (err)
+ return err;
+
+ mlxsw_reg_rips_pack(rips_pl, *p_kvdl_index, addr6);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl);
+ if (err)
+ goto err_rips_write;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ err = -ENOMEM;
+ goto err_node_alloc;
+ }
+
+ node->key = *addr6;
+ node->kvdl_index = *p_kvdl_index;
+ refcount_set(&node->refcount, 1);
+
+ err = rhashtable_insert_fast(&mlxsw_sp->ipv6_addr_ht,
+ &node->ht_node,
+ mlxsw_sp_ipv6_addr_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ return 0;
+
+err_rhashtable_insert:
+ kfree(node);
+err_node_alloc:
+err_rips_write:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
+ *p_kvdl_index);
+ return err;
+}
+
+static void mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipv6_addr_node *node)
+{
+ u32 kvdl_index = node->kvdl_index;
+
+ rhashtable_remove_fast(&mlxsw_sp->ipv6_addr_ht, &node->ht_node,
+ mlxsw_sp_ipv6_addr_ht_params);
+ kfree(node);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
+ kvdl_index);
+}
+
+int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp,
+ const struct in6_addr *addr6,
+ u32 *p_kvdl_index)
+{
+ struct mlxsw_sp_ipv6_addr_node *node;
+ int err = 0;
+
+ mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
+ node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
+ mlxsw_sp_ipv6_addr_ht_params);
+ if (node) {
+ refcount_inc(&node->refcount);
+ *p_kvdl_index = node->kvdl_index;
+ goto out_unlock;
+ }
+
+ err = mlxsw_sp_ipv6_addr_init(mlxsw_sp, addr6, p_kvdl_index);
+
+out_unlock:
+ mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
+ return err;
+}
+
+void
+mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6)
+{
+ struct mlxsw_sp_ipv6_addr_node *node;
+
+ mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
+ node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
+ mlxsw_sp_ipv6_addr_ht_params);
+ if (WARN_ON(!node))
+ goto out_unlock;
+
+ if (!refcount_dec_and_test(&node->refcount))
+ goto out_unlock;
+
+ mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node);
+
+out_unlock:
+ mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
+}
+
+static int mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ err = rhashtable_init(&mlxsw_sp->ipv6_addr_ht,
+ &mlxsw_sp_ipv6_addr_ht_params);
+ if (err)
+ return err;
+
+ mutex_init(&mlxsw_sp->ipv6_addr_ht_lock);
+ return 0;
+}
+
+static void mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mutex_destroy(&mlxsw_sp->ipv6_addr_ht_lock);
+ rhashtable_destroy(&mlxsw_sp->ipv6_addr_ht);
+}
+
static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info,
struct netlink_ext_ack *extack)
@@ -2843,6 +2977,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_afa_init;
}
+ err = mlxsw_sp_ipv6_addr_ht_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize hash table for IPv6 addresses\n");
+ goto err_ipv6_addr_ht_init;
+ }
+
err = mlxsw_sp_nve_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
@@ -2944,6 +3084,8 @@ err_router_init:
err_acl_init:
mlxsw_sp_nve_fini(mlxsw_sp);
err_nve_init:
+ mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
+err_ipv6_addr_ht_init:
mlxsw_sp_afa_fini(mlxsw_sp);
err_afa_init:
mlxsw_sp_counter_pool_fini(mlxsw_sp);
@@ -3075,6 +3217,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_router_fini(mlxsw_sp);
mlxsw_sp_acl_fini(mlxsw_sp);
mlxsw_sp_nve_fini(mlxsw_sp);
+ mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
mlxsw_sp_afa_fini(mlxsw_sp);
mlxsw_sp_counter_pool_fini(mlxsw_sp);
mlxsw_sp_switchdev_fini(mlxsw_sp);
@@ -3486,7 +3629,7 @@ static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
}
static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
- struct sk_buff *skb, u8 local_port)
+ struct sk_buff *skb, u16 local_port)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 32fdd37657dd..8445fc5c9ea3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -203,6 +203,8 @@ struct mlxsw_sp {
const struct mlxsw_listener *listeners;
size_t listeners_count;
u32 lowest_shaper_bs;
+ struct rhashtable ipv6_addr_ht;
+ struct mutex ipv6_addr_ht_lock; /* Protects ipv6_addr_ht */
};
struct mlxsw_sp_ptp_ops {
@@ -217,13 +219,13 @@ struct mlxsw_sp_ptp_ops {
* is responsible for freeing the passed-in SKB.
*/
void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
- u8 local_port);
+ u16 local_port);
/* Notify a driver that a timestamped packet was transmitted. Driver
* is responsible for freeing the passed-in SKB.
*/
void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
- u8 local_port);
+ u16 local_port);
int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port,
struct hwtstamp_config *config);
@@ -261,7 +263,7 @@ enum mlxsw_sp_sample_trigger_type {
struct mlxsw_sp_sample_trigger {
enum mlxsw_sp_sample_trigger_type type;
- u8 local_port; /* Reserved when trigger type is not ingress / egress. */
+ u16 local_port; /* Reserved when trigger type is not ingress / egress. */
};
struct mlxsw_sp_sample_params {
@@ -308,7 +310,7 @@ struct mlxsw_sp_port {
struct net_device *dev;
struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
struct mlxsw_sp *mlxsw_sp;
- u8 local_port;
+ u16 local_port;
u8 lagged:1,
split:1;
u16 pvid;
@@ -370,7 +372,7 @@ struct mlxsw_sp_port_type_speed_ops {
u32 (*to_ptys_speed_lanes)(struct mlxsw_sp *mlxsw_sp, u8 width,
const struct ethtool_link_ksettings *cmd);
void (*reg_ptys_eth_pack)(struct mlxsw_sp *mlxsw_sp, char *payload,
- u8 local_port, u32 proto_admin, bool autoneg);
+ u16 local_port, u32 proto_admin, bool autoneg);
void (*reg_ptys_eth_unpack)(struct mlxsw_sp *mlxsw_sp, char *payload,
u32 *p_eth_proto_cap,
u32 *p_eth_proto_admin,
@@ -441,7 +443,7 @@ static inline struct mlxsw_sp_port *
mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index)
{
struct mlxsw_sp_port *mlxsw_sp_port;
- u8 local_port;
+ u16 local_port;
local_port = mlxsw_core_lag_mapping_get(mlxsw_sp->core,
lag_id, port_index);
@@ -587,6 +589,11 @@ mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp,
void
mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_sample_trigger *trigger);
+int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp,
+ const struct in6_addr *addr6,
+ u32 *p_kvdl_index);
+void
+mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6);
extern const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals;
extern const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals;
@@ -621,9 +628,9 @@ extern struct notifier_block mlxsw_sp_switchdev_notifier;
/* spectrum.c */
void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
- u8 local_port, void *priv);
+ u16 local_port, void *priv);
void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
- u8 local_port);
+ u16 local_port);
int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed);
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
@@ -729,7 +736,7 @@ void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev);
u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev);
-u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
+u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
enum mlxsw_sp_l3proto ul_proto,
const union mlxsw_sp_l3addr *ul_sip,
@@ -1222,7 +1229,7 @@ bool mlxsw_sp_fid_vni_is_set(const struct mlxsw_sp_fid *fid);
void mlxsw_sp_fid_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
const struct net_device *nve_dev);
int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
- enum mlxsw_sp_flood_type packet_type, u8 local_port,
+ enum mlxsw_sp_flood_type packet_type, u16 local_port,
bool member);
int mlxsw_sp_fid_port_vid_map(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
@@ -1310,6 +1317,17 @@ void mlxsw_sp_nve_flood_ip_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid *fid,
enum mlxsw_sp_l3proto proto,
union mlxsw_sp_l3addr *addr);
+int mlxsw_sp_nve_ipv6_addr_kvdl_set(struct mlxsw_sp *mlxsw_sp,
+ const struct in6_addr *addr6,
+ u32 *p_kvdl_index);
+void mlxsw_sp_nve_ipv6_addr_kvdl_unset(struct mlxsw_sp *mlxsw_sp,
+ const struct in6_addr *addr6);
+int
+mlxsw_sp_nve_ipv6_addr_map_replace(struct mlxsw_sp *mlxsw_sp, const char *mac,
+ u16 fid_index,
+ const struct in6_addr *new_addr6);
+void mlxsw_sp_nve_ipv6_addr_map_del(struct mlxsw_sp *mlxsw_sp, const char *mac,
+ u16 fid_index);
int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
struct mlxsw_sp_nve_params *params,
struct netlink_ext_ack *extack);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 67cedfa76f78..70c11bfac08f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -406,7 +406,7 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port;
- u8 local_port;
+ u16 local_port;
bool in_port;
if (out_dev) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
index c72aa38424dc..50806594d977 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
@@ -83,7 +83,7 @@ static int mlxsw_sp2_act_kvdl_set_activity_get(void *priv, u32 kvdl_index,
}
static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
- u8 local_port)
+ u16 local_port)
{
struct mlxsw_sp *mlxsw_sp = priv;
char ppbs_pl[MLXSW_REG_PPBS_LEN];
@@ -132,7 +132,7 @@ mlxsw_sp_act_counter_index_put(void *priv, unsigned int counter_index)
}
static int
-mlxsw_sp_act_mirror_add(void *priv, u8 local_in_port,
+mlxsw_sp_act_mirror_add(void *priv, u16 local_in_port,
const struct net_device *out_dev,
bool ingress, int *p_span_id)
{
@@ -159,7 +159,7 @@ err_analyzed_port_get:
}
static void
-mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, int span_id, bool ingress)
+mlxsw_sp_act_mirror_del(void *priv, u16 local_in_port, int span_id, bool ingress)
{
struct mlxsw_sp_port *mlxsw_sp_port;
struct mlxsw_sp *mlxsw_sp = priv;
@@ -192,7 +192,7 @@ static void mlxsw_sp_act_policer_del(void *priv, u16 policer_index)
policer_index);
}
-static int mlxsw_sp1_act_sampler_add(void *priv, u8 local_port,
+static int mlxsw_sp1_act_sampler_add(void *priv, u16 local_port,
struct psample_group *psample_group,
u32 rate, u32 trunc_size, bool truncate,
bool ingress, int *p_span_id,
@@ -202,7 +202,7 @@ static int mlxsw_sp1_act_sampler_add(void *priv, u8 local_port,
return -EOPNOTSUPP;
}
-static void mlxsw_sp1_act_sampler_del(void *priv, u8 local_port, int span_id,
+static void mlxsw_sp1_act_sampler_del(void *priv, u16 local_port, int span_id,
bool ingress)
{
WARN_ON_ONCE(1);
@@ -224,7 +224,7 @@ const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = {
.sampler_del = mlxsw_sp1_act_sampler_del,
};
-static int mlxsw_sp2_act_sampler_add(void *priv, u8 local_port,
+static int mlxsw_sp2_act_sampler_add(void *priv, u16 local_port,
struct psample_group *psample_group,
u32 rate, u32 trunc_size, bool truncate,
bool ingress, int *p_span_id,
@@ -272,7 +272,7 @@ err_span_agent_get:
return err;
}
-static void mlxsw_sp2_act_sampler_del(void *priv, u8 local_port, int span_id,
+static void mlxsw_sp2_act_sampler_del(void *priv, u16 local_port, int span_id,
bool ingress)
{
struct mlxsw_sp_sample_trigger trigger = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index d78cf5a7220a..98f26f596e30 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -160,7 +160,7 @@ static bool mlxsw_sp_sb_cm_exists(u8 pg_buff, enum mlxsw_reg_sbxx_dir dir)
}
static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
- u8 local_port, u8 pg_buff,
+ u16 local_port, u8 pg_buff,
enum mlxsw_reg_sbxx_dir dir)
{
struct mlxsw_sp_sb_port *sb_port = &mlxsw_sp->sb->ports[local_port];
@@ -173,7 +173,7 @@ static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
}
static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
- u8 local_port, u16 pool_index)
+ u16 local_port, u16 pool_index)
{
return &mlxsw_sp->sb->ports[local_port].pms[pool_index];
}
@@ -202,7 +202,7 @@ static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
return 0;
}
-static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u16 local_port,
u8 pg_buff, u32 min_buff, u32 max_buff,
bool infi_max, u16 pool_index)
{
@@ -232,7 +232,7 @@ static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
return 0;
}
-static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u16 local_port,
u16 pool_index, u32 min_buff, u32 max_buff)
{
const struct mlxsw_sp_sb_pool_des *des =
@@ -253,7 +253,7 @@ static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
return 0;
}
-static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u16 local_port,
u16 pool_index, struct list_head *bulk_list)
{
const struct mlxsw_sp_sb_pool_des *des =
@@ -279,7 +279,7 @@ static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
}
-static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u16 local_port,
u16 pool_index, struct list_head *bulk_list)
{
const struct mlxsw_sp_sb_pool_des *des =
@@ -919,7 +919,7 @@ mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index)
return pr->mode == MLXSW_REG_SBPR_MODE_STATIC;
}
-static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u16 local_port,
enum mlxsw_reg_sbxx_dir dir,
const struct mlxsw_sp_sb_cm *cms,
size_t cms_len)
@@ -1037,7 +1037,7 @@ static const struct mlxsw_sp_sb_pm mlxsw_sp_cpu_port_sb_pms[] = {
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
};
-static int mlxsw_sp_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int mlxsw_sp_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u16 local_port,
const struct mlxsw_sp_sb_pm *pms,
bool skip_ingress)
{
@@ -1416,7 +1416,7 @@ int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp_port *mlxsw_sp_port =
mlxsw_core_port_driver_priv(mlxsw_core_port);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
pool_index);
@@ -1432,7 +1432,7 @@ int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp_port *mlxsw_sp_port =
mlxsw_core_port_driver_priv(mlxsw_core_port);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
u32 max_buff;
int err;
@@ -1458,7 +1458,7 @@ int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp_port *mlxsw_sp_port =
mlxsw_core_port_driver_priv(mlxsw_core_port);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
u8 pg_buff = tc_index;
enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
@@ -1479,7 +1479,7 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp_port *mlxsw_sp_port =
mlxsw_core_port_driver_priv(mlxsw_core_port);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
const struct mlxsw_sp_sb_cm *cm;
u8 pg_buff = tc_index;
enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
@@ -1526,7 +1526,7 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
u8 masked_count;
- u8 local_port_1;
+ u16 local_port_1;
};
static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
@@ -1536,7 +1536,7 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
u8 masked_count;
- u8 local_port;
+ u16 local_port;
int rec_index = 0;
struct mlxsw_sp_sb_cm *cm;
int i;
@@ -1582,13 +1582,12 @@ int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
unsigned int sb_index)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ u16 local_port, local_port_1, last_local_port;
struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
+ u8 masked_count, current_page = 0;
unsigned long cb_priv = 0;
LIST_HEAD(bulk_list);
char *sbsr_pl;
- u8 masked_count;
- u8 local_port_1;
- u8 local_port;
int i;
int err;
int err2;
@@ -1602,6 +1601,10 @@ next_batch:
local_port_1 = local_port;
masked_count = 0;
mlxsw_reg_sbsr_pack(sbsr_pl, false);
+ mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
+ last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
+ MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
+
for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
@@ -1609,6 +1612,10 @@ next_batch:
for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
if (!mlxsw_sp->ports[local_port])
continue;
+ if (local_port > last_local_port) {
+ current_page++;
+ goto do_query;
+ }
if (local_port != MLXSW_PORT_CPU_PORT) {
/* Ingress quotas are not supported for the CPU port */
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
@@ -1651,10 +1658,11 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
unsigned int sb_index)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ u16 local_port, last_local_port;
LIST_HEAD(bulk_list);
- char *sbsr_pl;
unsigned int masked_count;
- u8 local_port;
+ u8 current_page = 0;
+ char *sbsr_pl;
int i;
int err;
int err2;
@@ -1667,6 +1675,10 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
next_batch:
masked_count = 0;
mlxsw_reg_sbsr_pack(sbsr_pl, true);
+ mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
+ last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
+ MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
+
for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
@@ -1674,6 +1686,10 @@ next_batch:
for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
if (!mlxsw_sp->ports[local_port])
continue;
+ if (local_port > last_local_port) {
+ current_page++;
+ goto do_query;
+ }
if (local_port != MLXSW_PORT_CPU_PORT) {
/* Ingress quotas are not supported for the CPU port */
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
@@ -1715,7 +1731,7 @@ int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp_port *mlxsw_sp_port =
mlxsw_core_port_driver_priv(mlxsw_core_port);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
pool_index);
@@ -1732,7 +1748,7 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp_port *mlxsw_sp_port =
mlxsw_core_port_driver_priv(mlxsw_core_port);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
u8 pg_buff = tc_index;
enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 84d4460f3dcd..20530712eadb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -1491,7 +1491,7 @@ static u32 mlxsw_sp1_to_ptys_speed_lanes(struct mlxsw_sp *mlxsw_sp, u8 width,
static void
mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload,
- u8 local_port, u32 proto_admin, bool autoneg)
+ u16 local_port, u32 proto_admin, bool autoneg)
{
mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg);
}
@@ -1969,7 +1969,7 @@ static u32 mlxsw_sp2_to_ptys_speed_lanes(struct mlxsw_sp *mlxsw_sp, u8 width,
static void
mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload,
- u8 local_port, u32 proto_admin,
+ u16 local_port, u32 proto_admin,
bool autoneg)
{
mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index 004c42274e48..ce80931f0402 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -317,13 +317,13 @@ mlxsw_sp_fid_flood_table_lookup(const struct mlxsw_sp_fid *fid,
}
int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
- enum mlxsw_sp_flood_type packet_type, u8 local_port,
+ enum mlxsw_sp_flood_type packet_type, u16 local_port,
bool member)
{
struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
const struct mlxsw_sp_flood_table *flood_table;
- char *sftr_pl;
+ char *sftr2_pl;
int err;
if (WARN_ON(!fid_family->flood_tables || !ops->flood_index))
@@ -333,16 +333,16 @@ int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
if (!flood_table)
return -ESRCH;
- sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
- if (!sftr_pl)
+ sftr2_pl = kmalloc(MLXSW_REG_SFTR2_LEN, GFP_KERNEL);
+ if (!sftr2_pl)
return -ENOMEM;
- mlxsw_reg_sftr_pack(sftr_pl, flood_table->table_index,
- ops->flood_index(fid), flood_table->table_type, 1,
- local_port, member);
- err = mlxsw_reg_write(fid_family->mlxsw_sp->core, MLXSW_REG(sftr),
- sftr_pl);
- kfree(sftr_pl);
+ mlxsw_reg_sftr2_pack(sftr2_pl, flood_table->table_index,
+ ops->flood_index(fid), flood_table->table_type, 1,
+ local_port, member);
+ err = mlxsw_reg_write(fid_family->mlxsw_sp->core, MLXSW_REG(sftr2),
+ sftr2_pl);
+ kfree(sftr2_pl);
return err;
}
@@ -439,7 +439,7 @@ static int mlxsw_sp_fid_vni_op(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
}
static int __mlxsw_sp_fid_port_vid_map(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
- u8 local_port, u16 vid, bool valid)
+ u16 local_port, u16 vid, bool valid)
{
enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
char svfa_pl[MLXSW_REG_SVFA_LEN];
@@ -573,7 +573,7 @@ static int mlxsw_sp_fid_8021d_port_vid_map(struct mlxsw_sp_fid *fid,
u16 vid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
int err;
err = __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
@@ -601,7 +601,7 @@ mlxsw_sp_fid_8021d_port_vid_unmap(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 1)
mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
@@ -784,7 +784,7 @@ static int mlxsw_sp_fid_rfid_port_vid_map(struct mlxsw_sp_fid *fid,
u16 vid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
int err;
/* We only need to transition the port to virtual mode since
@@ -808,7 +808,7 @@ mlxsw_sp_fid_rfid_port_vid_unmap(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 1)
mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index ad3926de88f2..01cf5a6a26bd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -568,37 +568,21 @@ static int
mlxsw_sp2_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry)
{
- char rips_pl[MLXSW_REG_RIPS_LEN];
struct __ip6_tnl_parm parms6;
- int err;
-
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp,
- MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
- &ipip_entry->dip_kvdl_index);
- if (err)
- return err;
parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev);
- mlxsw_reg_rips_pack(rips_pl, ipip_entry->dip_kvdl_index,
- &parms6.raddr);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl);
- if (err)
- goto err_rips_write;
-
- return 0;
-
-err_rips_write:
- mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
- ipip_entry->dip_kvdl_index);
- return err;
+ return mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp, &parms6.raddr,
+ &ipip_entry->dip_kvdl_index);
}
static void
mlxsw_sp2_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_ipip_entry *ipip_entry)
{
- mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
- ipip_entry->dip_kvdl_index);
+ struct __ip6_tnl_parm parms6;
+
+ parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev);
+ mlxsw_sp_ipv6_addr_put(mlxsw_sp, &parms6.raddr);
}
static const struct mlxsw_sp_ipip_ops mlxsw_sp2_ipip_gre6_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
index 9eba8fa684ae..d2b57a045aa4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -130,15 +130,25 @@ mlxsw_sp_nve_mc_record_ipv6_entry_add(struct mlxsw_sp_nve_mc_record *mc_record,
struct mlxsw_sp_nve_mc_entry *mc_entry,
const union mlxsw_sp_l3addr *addr)
{
- WARN_ON(1);
+ u32 kvdl_index;
+ int err;
+
+ err = mlxsw_sp_ipv6_addr_kvdl_index_get(mc_record->mlxsw_sp,
+ &addr->addr6, &kvdl_index);
+ if (err)
+ return err;
- return -EINVAL;
+ mc_entry->ipv6_entry.addr6 = addr->addr6;
+ mc_entry->ipv6_entry.addr6_kvdl_index = kvdl_index;
+ return 0;
}
static void
mlxsw_sp_nve_mc_record_ipv6_entry_del(const struct mlxsw_sp_nve_mc_record *mc_record,
const struct mlxsw_sp_nve_mc_entry *mc_entry)
{
+ mlxsw_sp_ipv6_addr_put(mc_record->mlxsw_sp,
+ &mc_entry->ipv6_entry.addr6);
}
static void
@@ -787,6 +797,142 @@ static void mlxsw_sp_nve_fdb_clear_offload(struct mlxsw_sp *mlxsw_sp,
ops->fdb_clear_offload(nve_dev, vni);
}
+struct mlxsw_sp_nve_ipv6_ht_key {
+ u8 mac[ETH_ALEN];
+ u16 fid_index;
+};
+
+struct mlxsw_sp_nve_ipv6_ht_node {
+ struct rhash_head ht_node;
+ struct list_head list;
+ struct mlxsw_sp_nve_ipv6_ht_key key;
+ struct in6_addr addr6;
+};
+
+static const struct rhashtable_params mlxsw_sp_nve_ipv6_ht_params = {
+ .key_len = sizeof(struct mlxsw_sp_nve_ipv6_ht_key),
+ .key_offset = offsetof(struct mlxsw_sp_nve_ipv6_ht_node, key),
+ .head_offset = offsetof(struct mlxsw_sp_nve_ipv6_ht_node, ht_node),
+};
+
+int mlxsw_sp_nve_ipv6_addr_kvdl_set(struct mlxsw_sp *mlxsw_sp,
+ const struct in6_addr *addr6,
+ u32 *p_kvdl_index)
+{
+ return mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp, addr6, p_kvdl_index);
+}
+
+void mlxsw_sp_nve_ipv6_addr_kvdl_unset(struct mlxsw_sp *mlxsw_sp,
+ const struct in6_addr *addr6)
+{
+ mlxsw_sp_ipv6_addr_put(mlxsw_sp, addr6);
+}
+
+static struct mlxsw_sp_nve_ipv6_ht_node *
+mlxsw_sp_nve_ipv6_ht_node_lookup(struct mlxsw_sp *mlxsw_sp, const char *mac,
+ u16 fid_index)
+{
+ struct mlxsw_sp_nve_ipv6_ht_key key = {};
+
+ ether_addr_copy(key.mac, mac);
+ key.fid_index = fid_index;
+ return rhashtable_lookup_fast(&mlxsw_sp->nve->ipv6_ht, &key,
+ mlxsw_sp_nve_ipv6_ht_params);
+}
+
+static int mlxsw_sp_nve_ipv6_ht_insert(struct mlxsw_sp *mlxsw_sp,
+ const char *mac, u16 fid_index,
+ const struct in6_addr *addr6)
+{
+ struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node;
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+ int err;
+
+ ipv6_ht_node = kzalloc(sizeof(*ipv6_ht_node), GFP_KERNEL);
+ if (!ipv6_ht_node)
+ return -ENOMEM;
+
+ ether_addr_copy(ipv6_ht_node->key.mac, mac);
+ ipv6_ht_node->key.fid_index = fid_index;
+ ipv6_ht_node->addr6 = *addr6;
+
+ err = rhashtable_insert_fast(&nve->ipv6_ht, &ipv6_ht_node->ht_node,
+ mlxsw_sp_nve_ipv6_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ list_add(&ipv6_ht_node->list, &nve->ipv6_addr_list);
+
+ return 0;
+
+err_rhashtable_insert:
+ kfree(ipv6_ht_node);
+ return err;
+}
+
+static void
+mlxsw_sp_nve_ipv6_ht_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node)
+{
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+
+ list_del(&ipv6_ht_node->list);
+ rhashtable_remove_fast(&nve->ipv6_ht, &ipv6_ht_node->ht_node,
+ mlxsw_sp_nve_ipv6_ht_params);
+ kfree(ipv6_ht_node);
+}
+
+int
+mlxsw_sp_nve_ipv6_addr_map_replace(struct mlxsw_sp *mlxsw_sp, const char *mac,
+ u16 fid_index,
+ const struct in6_addr *new_addr6)
+{
+ struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node;
+
+ ASSERT_RTNL();
+
+ ipv6_ht_node = mlxsw_sp_nve_ipv6_ht_node_lookup(mlxsw_sp, mac,
+ fid_index);
+ if (!ipv6_ht_node)
+ return mlxsw_sp_nve_ipv6_ht_insert(mlxsw_sp, mac, fid_index,
+ new_addr6);
+
+ mlxsw_sp_ipv6_addr_put(mlxsw_sp, &ipv6_ht_node->addr6);
+ ipv6_ht_node->addr6 = *new_addr6;
+ return 0;
+}
+
+void mlxsw_sp_nve_ipv6_addr_map_del(struct mlxsw_sp *mlxsw_sp, const char *mac,
+ u16 fid_index)
+{
+ struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node;
+
+ ASSERT_RTNL();
+
+ ipv6_ht_node = mlxsw_sp_nve_ipv6_ht_node_lookup(mlxsw_sp, mac,
+ fid_index);
+ if (WARN_ON(!ipv6_ht_node))
+ return;
+
+ mlxsw_sp_nve_ipv6_ht_remove(mlxsw_sp, ipv6_ht_node);
+}
+
+static void mlxsw_sp_nve_ipv6_addr_flush_by_fid(struct mlxsw_sp *mlxsw_sp,
+ u16 fid_index)
+{
+ struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node, *tmp;
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+
+ list_for_each_entry_safe(ipv6_ht_node, tmp, &nve->ipv6_addr_list,
+ list) {
+ if (ipv6_ht_node->key.fid_index != fid_index)
+ continue;
+
+ mlxsw_sp_ipv6_addr_put(mlxsw_sp, &ipv6_ht_node->addr6);
+ mlxsw_sp_nve_ipv6_ht_remove(mlxsw_sp, ipv6_ht_node);
+ }
+}
+
int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
struct mlxsw_sp_nve_params *params,
struct netlink_ext_ack *extack)
@@ -845,6 +991,7 @@ void mlxsw_sp_nve_fid_disable(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nve_flood_ip_flush(mlxsw_sp, fid);
mlxsw_sp_nve_fdb_flush_by_fid(mlxsw_sp, fid_index);
+ mlxsw_sp_nve_ipv6_addr_flush_by_fid(mlxsw_sp, fid_index);
if (WARN_ON(mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex) ||
mlxsw_sp_fid_vni(fid, &vni)))
@@ -981,7 +1128,13 @@ int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp)
err = rhashtable_init(&nve->mc_list_ht,
&mlxsw_sp_nve_mc_list_ht_params);
if (err)
- goto err_rhashtable_init;
+ goto err_mc_rhashtable_init;
+
+ err = rhashtable_init(&nve->ipv6_ht, &mlxsw_sp_nve_ipv6_ht_params);
+ if (err)
+ goto err_ipv6_rhashtable_init;
+
+ INIT_LIST_HEAD(&nve->ipv6_addr_list);
err = mlxsw_sp_nve_qos_init(mlxsw_sp);
if (err)
@@ -1000,8 +1153,10 @@ int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp)
err_nve_resources_query:
err_nve_ecn_init:
err_nve_qos_init:
+ rhashtable_destroy(&nve->ipv6_ht);
+err_ipv6_rhashtable_init:
rhashtable_destroy(&nve->mc_list_ht);
-err_rhashtable_init:
+err_mc_rhashtable_init:
mlxsw_sp->nve = NULL;
kfree(nve);
return err;
@@ -1010,6 +1165,8 @@ err_rhashtable_init:
void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp)
{
WARN_ON(mlxsw_sp->nve->num_nve_tunnels);
+ WARN_ON(!list_empty(&mlxsw_sp->nve->ipv6_addr_list));
+ rhashtable_destroy(&mlxsw_sp->nve->ipv6_ht);
rhashtable_destroy(&mlxsw_sp->nve->mc_list_ht);
kfree(mlxsw_sp->nve);
mlxsw_sp->nve = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
index 98d1fdc25eac..0d21de1d0395 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
@@ -23,6 +23,8 @@ struct mlxsw_sp_nve_config {
struct mlxsw_sp_nve {
struct mlxsw_sp_nve_config config;
struct rhashtable mc_list_ht;
+ struct rhashtable ipv6_ht;
+ struct list_head ipv6_addr_list; /* Saves hash table nodes. */
struct mlxsw_sp *mlxsw_sp;
const struct mlxsw_sp_nve_ops **nve_ops_arr;
unsigned int num_nve_tunnels; /* Protected by RTNL */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
index d018d2da5949..d309b77a0194 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
@@ -10,8 +10,48 @@
#include "spectrum.h"
#include "spectrum_nve.h"
-#define MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS (VXLAN_F_UDP_ZERO_CSUM_TX | \
+#define MLXSW_SP_NVE_VXLAN_IPV4_SUPPORTED_FLAGS (VXLAN_F_UDP_ZERO_CSUM_TX | \
VXLAN_F_LEARN)
+#define MLXSW_SP_NVE_VXLAN_IPV6_SUPPORTED_FLAGS (VXLAN_F_IPV6 | \
+ VXLAN_F_UDP_ZERO_CSUM6_TX | \
+ VXLAN_F_UDP_ZERO_CSUM6_RX)
+
+static bool mlxsw_sp_nve_vxlan_ipv4_flags_check(const struct vxlan_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Zero UDP checksum must be allowed for TX");
+ return false;
+ }
+
+ if (cfg->flags & ~MLXSW_SP_NVE_VXLAN_IPV4_SUPPORTED_FLAGS) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Unsupported flag");
+ return false;
+ }
+
+ return true;
+}
+
+static bool mlxsw_sp_nve_vxlan_ipv6_flags_check(const struct vxlan_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Zero UDP checksum must be allowed for TX");
+ return false;
+ }
+
+ if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Zero UDP checksum must be allowed for RX");
+ return false;
+ }
+
+ if (cfg->flags & ~MLXSW_SP_NVE_VXLAN_IPV6_SUPPORTED_FLAGS) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Unsupported flag");
+ return false;
+ }
+
+ return true;
+}
static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
const struct mlxsw_sp_nve_params *params,
@@ -20,11 +60,6 @@ static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
struct vxlan_dev *vxlan = netdev_priv(params->dev);
struct vxlan_config *cfg = &vxlan->cfg;
- if (cfg->saddr.sa.sa_family != AF_INET) {
- NL_SET_ERR_MSG_MOD(extack, "VxLAN: Only IPv4 underlay is supported");
- return false;
- }
-
if (vxlan_addr_multicast(&cfg->remote_ip)) {
NL_SET_ERR_MSG_MOD(extack, "VxLAN: Multicast destination IP is not supported");
return false;
@@ -55,14 +90,15 @@ static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
return false;
}
- if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) {
- NL_SET_ERR_MSG_MOD(extack, "VxLAN: UDP checksum is not supported");
- return false;
- }
-
- if (cfg->flags & ~MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS) {
- NL_SET_ERR_MSG_MOD(extack, "VxLAN: Unsupported flag");
- return false;
+ switch (cfg->saddr.sa.sa_family) {
+ case AF_INET:
+ if (!mlxsw_sp_nve_vxlan_ipv4_flags_check(cfg, extack))
+ return false;
+ break;
+ case AF_INET6:
+ if (!mlxsw_sp_nve_vxlan_ipv6_flags_check(cfg, extack))
+ return false;
+ break;
}
if (cfg->ttl == 0) {
@@ -90,6 +126,22 @@ static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
return mlxsw_sp_nve_vxlan_can_offload(nve, params, extack);
}
+static void
+mlxsw_sp_nve_vxlan_ul_proto_sip_config(const struct vxlan_config *cfg,
+ struct mlxsw_sp_nve_config *config)
+{
+ switch (cfg->saddr.sa.sa_family) {
+ case AF_INET:
+ config->ul_proto = MLXSW_SP_L3_PROTO_IPV4;
+ config->ul_sip.addr4 = cfg->saddr.sin.sin_addr.s_addr;
+ break;
+ case AF_INET6:
+ config->ul_proto = MLXSW_SP_L3_PROTO_IPV6;
+ config->ul_sip.addr6 = cfg->saddr.sin6.sin6_addr;
+ break;
+ }
+}
+
static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve,
const struct mlxsw_sp_nve_params *params,
struct mlxsw_sp_nve_config *config)
@@ -102,8 +154,7 @@ static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve,
config->flowlabel = cfg->label;
config->learning_en = cfg->flags & VXLAN_F_LEARN ? 1 : 0;
config->ul_tb_id = RT_TABLE_MAIN;
- config->ul_proto = MLXSW_SP_L3_PROTO_IPV4;
- config->ul_sip.addr4 = cfg->saddr.sin.sin_addr.s_addr;
+ mlxsw_sp_nve_vxlan_ul_proto_sip_config(cfg, config);
config->udp_dport = cfg->dst_port;
}
@@ -111,6 +162,7 @@ static void
mlxsw_sp_nve_vxlan_config_prepare(char *tngcr_pl,
const struct mlxsw_sp_nve_config *config)
{
+ struct in6_addr addr6;
u8 udp_sport;
mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, true,
@@ -122,7 +174,18 @@ mlxsw_sp_nve_vxlan_config_prepare(char *tngcr_pl,
get_random_bytes(&udp_sport, sizeof(udp_sport));
udp_sport = (udp_sport % (0xee - 0x80 + 1)) + 0x80;
mlxsw_reg_tngcr_nve_udp_sport_prefix_set(tngcr_pl, udp_sport);
- mlxsw_reg_tngcr_usipv4_set(tngcr_pl, be32_to_cpu(config->ul_sip.addr4));
+
+ switch (config->ul_proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ mlxsw_reg_tngcr_usipv4_set(tngcr_pl,
+ be32_to_cpu(config->ul_sip.addr4));
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ addr6 = config->ul_sip.addr6;
+ mlxsw_reg_tngcr_usipv6_memcpy_to(tngcr_pl,
+ (const char *)&addr6);
+ break;
+ }
}
static int
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index 1a180384e7e8..0ff163fbc775 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -36,7 +36,7 @@ struct mlxsw_sp_ptp_state {
};
struct mlxsw_sp1_ptp_key {
- u8 local_port;
+ u16 local_port;
u8 message_type;
u16 sequence_id;
u8 domain_number;
@@ -406,7 +406,7 @@ mlxsw_sp1_ptp_unmatched_remove(struct mlxsw_sp *mlxsw_sp,
* This case is similar to 2) above.
*/
static void mlxsw_sp1_ptp_packet_finish(struct mlxsw_sp *mlxsw_sp,
- struct sk_buff *skb, u8 local_port,
+ struct sk_buff *skb, u16 local_port,
bool ingress,
struct skb_shared_hwtstamps *hwtstamps)
{
@@ -524,7 +524,7 @@ static void mlxsw_sp1_ptp_got_piece(struct mlxsw_sp *mlxsw_sp,
}
static void mlxsw_sp1_ptp_got_packet(struct mlxsw_sp *mlxsw_sp,
- struct sk_buff *skb, u8 local_port,
+ struct sk_buff *skb, u16 local_port,
bool ingress)
{
struct mlxsw_sp_port *mlxsw_sp_port;
@@ -564,7 +564,7 @@ immediate:
}
void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
- u8 local_port, u8 message_type,
+ u16 local_port, u8 message_type,
u8 domain_number, u16 sequence_id,
u64 timestamp)
{
@@ -599,14 +599,14 @@ void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
}
void mlxsw_sp1_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
- u8 local_port)
+ u16 local_port)
{
skb_reset_mac_header(skb);
mlxsw_sp1_ptp_got_packet(mlxsw_sp, skb, local_port, true);
}
void mlxsw_sp1_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
- struct sk_buff *skb, u8 local_port)
+ struct sk_buff *skb, u16 local_port)
{
mlxsw_sp1_ptp_got_packet(mlxsw_sp, skb, local_port, false);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
index 1d43a3755285..c06cd1384bca 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -31,13 +31,13 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state);
void mlxsw_sp1_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
- u8 local_port);
+ u16 local_port);
void mlxsw_sp1_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
- struct sk_buff *skb, u8 local_port);
+ struct sk_buff *skb, u16 local_port);
void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
- u8 local_port, u8 message_type,
+ u16 local_port, u8 message_type,
u8 domain_number, u16 sequence_id,
u64 timestamp);
@@ -80,20 +80,20 @@ static inline void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state)
}
static inline void mlxsw_sp1_ptp_receive(struct mlxsw_sp *mlxsw_sp,
- struct sk_buff *skb, u8 local_port)
+ struct sk_buff *skb, u16 local_port)
{
mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp);
}
static inline void mlxsw_sp1_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
- struct sk_buff *skb, u8 local_port)
+ struct sk_buff *skb, u16 local_port)
{
dev_kfree_skb_any(skb);
}
static inline void
mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
- u8 local_port, u8 message_type,
+ u16 local_port, u8 message_type,
u8 domain_number,
u16 sequence_id, u64 timestamp)
{
@@ -159,13 +159,13 @@ static inline void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state)
}
static inline void mlxsw_sp2_ptp_receive(struct mlxsw_sp *mlxsw_sp,
- struct sk_buff *skb, u8 local_port)
+ struct sk_buff *skb, u16 local_port)
{
mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp);
}
static inline void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
- struct sk_buff *skb, u8 local_port)
+ struct sk_buff *skb, u16 local_port)
{
dev_kfree_skb_any(skb);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 98df6e8fa45f..7257a04660d7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -1307,6 +1307,10 @@ mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
addr_prefix_len = 32;
break;
case MLXSW_SP_L3_PROTO_IPV6:
+ addrp = &addr->addr6;
+ addr_len = 16;
+ addr_prefix_len = 128;
+ break;
default:
WARN_ON(1);
return NULL;
@@ -7002,6 +7006,8 @@ mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr };
+ u32 tb_id = mlxsw_sp_fix_tb_id(rt->fib6_table->tb6_id);
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
int ifindex = nhgi->nexthops[0].ifindex;
struct mlxsw_sp_ipip_entry *ipip_entry;
@@ -7015,6 +7021,14 @@ mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry,
ipip_entry);
}
+ if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
+ MLXSW_SP_L3_PROTO_IPV6, &dip)) {
+ u32 tunnel_index;
+
+ tunnel_index = router->nve_decap_config.tunnel_index;
+ fib_entry->decap.tunnel_index = tunnel_index;
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
+ }
return 0;
}
@@ -9340,7 +9354,7 @@ static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
-u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
+u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
{
return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index f5f819aa9a65..f9671cc53002 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -37,7 +37,7 @@ struct mlxsw_sp_span {
struct mlxsw_sp_span_analyzed_port {
struct list_head list; /* Member of analyzed_ports_list */
refcount_t ref_count;
- u8 local_port;
+ u16 local_port;
bool ingress;
};
@@ -46,7 +46,7 @@ struct mlxsw_sp_span_trigger_entry {
struct mlxsw_sp_span *span;
const struct mlxsw_sp_span_trigger_ops *ops;
refcount_t ref_count;
- u8 local_port;
+ u16 local_port;
enum mlxsw_sp_span_trigger trigger;
struct mlxsw_sp_span_trigger_parms parms;
};
@@ -179,7 +179,7 @@ mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry,
{
struct mlxsw_sp_port *dest_port = sparms.dest_port;
struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
- u8 local_port = dest_port->local_port;
+ u16 local_port = dest_port->local_port;
char mpat_pl[MLXSW_REG_MPAT_LEN];
int pa_id = span_entry->id;
@@ -199,7 +199,7 @@ mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry,
{
struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port;
struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
- u8 local_port = dest_port->local_port;
+ u16 local_port = dest_port->local_port;
char mpat_pl[MLXSW_REG_MPAT_LEN];
int pa_id = span_entry->id;
@@ -480,7 +480,7 @@ mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry,
{
struct mlxsw_sp_port *dest_port = sparms.dest_port;
struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
- u8 local_port = dest_port->local_port;
+ u16 local_port = dest_port->local_port;
char mpat_pl[MLXSW_REG_MPAT_LEN];
int pa_id = span_entry->id;
@@ -584,7 +584,7 @@ mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry,
{
struct mlxsw_sp_port *dest_port = sparms.dest_port;
struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
- u8 local_port = dest_port->local_port;
+ u16 local_port = dest_port->local_port;
char mpat_pl[MLXSW_REG_MPAT_LEN];
int pa_id = span_entry->id;
@@ -650,7 +650,7 @@ mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry,
{
struct mlxsw_sp_port *dest_port = sparms.dest_port;
struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
- u8 local_port = dest_port->local_port;
+ u16 local_port = dest_port->local_port;
char mpat_pl[MLXSW_REG_MPAT_LEN];
int pa_id = span_entry->id;
@@ -997,7 +997,7 @@ static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp_port *mlxsw_sp_por
}
static struct mlxsw_sp_span_analyzed_port *
-mlxsw_sp_span_analyzed_port_find(struct mlxsw_sp_span *span, u8 local_port,
+mlxsw_sp_span_analyzed_port_find(struct mlxsw_sp_span *span, u16 local_port,
bool ingress)
{
struct mlxsw_sp_span_analyzed_port *analyzed_port;
@@ -1165,7 +1165,7 @@ int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_span_analyzed_port *analyzed_port;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
int err = 0;
mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
@@ -1193,7 +1193,7 @@ void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_span_analyzed_port *analyzed_port;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 81c7e8a7fcf5..65c1724c63b0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -865,17 +865,17 @@ static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
u16 mid_idx, bool add)
{
- char *smid_pl;
+ char *smid2_pl;
int err;
- smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
- if (!smid_pl)
+ smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
+ if (!smid2_pl)
return -ENOMEM;
- mlxsw_reg_smid_pack(smid_pl, mid_idx,
- mlxsw_sp_router_port(mlxsw_sp), add);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
- kfree(smid_pl);
+ mlxsw_reg_smid2_pack(smid2_pl, mid_idx,
+ mlxsw_sp_router_port(mlxsw_sp), add);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
+ kfree(smid2_pl);
return err;
}
@@ -980,7 +980,7 @@ mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
struct mlxsw_sp_bridge_device *bridge_device;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
u16 vid = mlxsw_sp_port_vlan->vid;
struct mlxsw_sp_fid *fid;
int err;
@@ -1029,7 +1029,7 @@ mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
u16 vid = mlxsw_sp_port_vlan->vid;
mlxsw_sp_port_vlan->fid = NULL;
@@ -1290,38 +1290,52 @@ static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
MLXSW_REG_SFD_OP_WRITE_REMOVE;
}
-static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
- const char *mac, u16 fid,
- enum mlxsw_sp_l3proto proto,
- const union mlxsw_sp_l3addr *addr,
- bool adding, bool dynamic)
+static int
+mlxsw_sp_port_fdb_tun_uc_op4(struct mlxsw_sp *mlxsw_sp, bool dynamic,
+ const char *mac, u16 fid, __be32 addr, bool adding)
{
- enum mlxsw_reg_sfd_uc_tunnel_protocol sfd_proto;
char *sfd_pl;
u8 num_rec;
u32 uip;
int err;
- switch (proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- uip = be32_to_cpu(addr->addr4);
- sfd_proto = MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4;
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- default:
- WARN_ON(1);
- return -EOPNOTSUPP;
- }
+ sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
+ if (!sfd_pl)
+ return -ENOMEM;
+
+ uip = be32_to_cpu(addr);
+ mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
+ mlxsw_reg_sfd_uc_tunnel_pack4(sfd_pl, 0,
+ mlxsw_sp_sfd_rec_policy(dynamic), mac,
+ fid, MLXSW_REG_SFD_REC_ACTION_NOP, uip);
+ num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
+ if (err)
+ goto out;
+
+ if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
+ err = -EBUSY;
+
+out:
+ kfree(sfd_pl);
+ return err;
+}
+
+static int mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(struct mlxsw_sp *mlxsw_sp,
+ const char *mac, u16 fid,
+ u32 kvdl_index, bool adding)
+{
+ char *sfd_pl;
+ u8 num_rec;
+ int err;
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
if (!sfd_pl)
return -ENOMEM;
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
- mlxsw_reg_sfd_uc_tunnel_pack(sfd_pl, 0,
- mlxsw_sp_sfd_rec_policy(dynamic), mac, fid,
- MLXSW_REG_SFD_REC_ACTION_NOP, uip,
- sfd_proto);
+ mlxsw_reg_sfd_uc_tunnel_pack6(sfd_pl, 0, mac, fid,
+ MLXSW_REG_SFD_REC_ACTION_NOP, kvdl_index);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
if (err)
@@ -1335,7 +1349,80 @@ out:
return err;
}
-static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int mlxsw_sp_port_fdb_tun_uc_op6_add(struct mlxsw_sp *mlxsw_sp,
+ const char *mac, u16 fid,
+ const struct in6_addr *addr)
+{
+ u32 kvdl_index;
+ int err;
+
+ err = mlxsw_sp_nve_ipv6_addr_kvdl_set(mlxsw_sp, addr, &kvdl_index);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid,
+ kvdl_index, true);
+ if (err)
+ goto err_sfd_write;
+
+ err = mlxsw_sp_nve_ipv6_addr_map_replace(mlxsw_sp, mac, fid, addr);
+ if (err)
+ /* Replace can fail only for creating new mapping, so removing
+ * the FDB entry in the error path is OK.
+ */
+ goto err_addr_replace;
+
+ return 0;
+
+err_addr_replace:
+ mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, kvdl_index,
+ false);
+err_sfd_write:
+ mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr);
+ return err;
+}
+
+static void mlxsw_sp_port_fdb_tun_uc_op6_del(struct mlxsw_sp *mlxsw_sp,
+ const char *mac, u16 fid,
+ const struct in6_addr *addr)
+{
+ mlxsw_sp_nve_ipv6_addr_map_del(mlxsw_sp, mac, fid);
+ mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, 0, false);
+ mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr);
+}
+
+static int
+mlxsw_sp_port_fdb_tun_uc_op6(struct mlxsw_sp *mlxsw_sp, const char *mac,
+ u16 fid, const struct in6_addr *addr, bool adding)
+{
+ if (adding)
+ return mlxsw_sp_port_fdb_tun_uc_op6_add(mlxsw_sp, mac, fid,
+ addr);
+
+ mlxsw_sp_port_fdb_tun_uc_op6_del(mlxsw_sp, mac, fid, addr);
+ return 0;
+}
+
+static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
+ const char *mac, u16 fid,
+ enum mlxsw_sp_l3proto proto,
+ const union mlxsw_sp_l3addr *addr,
+ bool adding, bool dynamic)
+{
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ return mlxsw_sp_port_fdb_tun_uc_op4(mlxsw_sp, dynamic, mac, fid,
+ addr->addr4, adding);
+ case MLXSW_SP_L3_PROTO_IPV6:
+ return mlxsw_sp_port_fdb_tun_uc_op6(mlxsw_sp, mac, fid,
+ &addr->addr6, adding);
+ default:
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+}
+
+static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
const char *mac, u16 fid, bool adding,
enum mlxsw_reg_sfd_rec_action action,
enum mlxsw_reg_sfd_rec_policy policy)
@@ -1363,7 +1450,7 @@ out:
return err;
}
-static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
const char *mac, u16 fid, bool adding,
bool dynamic)
{
@@ -1477,30 +1564,30 @@ static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
long *ports_bitmap,
bool set_router_port)
{
- char *smid_pl;
+ char *smid2_pl;
int err, i;
- smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
- if (!smid_pl)
+ smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
+ if (!smid2_pl)
return -ENOMEM;
- mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false);
+ mlxsw_reg_smid2_pack(smid2_pl, mid_idx, 0, false);
for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
if (mlxsw_sp->ports[i])
- mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
+ mlxsw_reg_smid2_port_mask_set(smid2_pl, i, 1);
}
- mlxsw_reg_smid_port_mask_set(smid_pl,
- mlxsw_sp_router_port(mlxsw_sp), 1);
+ mlxsw_reg_smid2_port_mask_set(smid2_pl,
+ mlxsw_sp_router_port(mlxsw_sp), 1);
for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
- mlxsw_reg_smid_port_set(smid_pl, i, 1);
+ mlxsw_reg_smid2_port_set(smid2_pl, i, 1);
- mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp),
- set_router_port);
+ mlxsw_reg_smid2_port_set(smid2_pl, mlxsw_sp_router_port(mlxsw_sp),
+ set_router_port);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
- kfree(smid_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
+ kfree(smid2_pl);
return err;
}
@@ -1508,16 +1595,16 @@ static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 mid_idx, bool add)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char *smid_pl;
+ char *smid2_pl;
int err;
- smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
- if (!smid_pl)
+ smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
+ if (!smid2_pl)
return -ENOMEM;
- mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
- kfree(smid_pl);
+ mlxsw_reg_smid2_pack(smid2_pl, mid_idx, mlxsw_sp_port->local_port, add);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
+ kfree(smid2_pl);
return err;
}
@@ -2536,7 +2623,7 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_port *mlxsw_sp_port;
enum switchdev_notifier_type type;
char mac[ETH_ALEN];
- u8 local_port;
+ u16 local_port;
u16 vid, fid;
bool do_notification = true;
int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 26d01adbedad..47b061b99160 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -60,7 +60,7 @@ enum {
};
static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
- u8 local_port,
+ u16 local_port,
struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
@@ -85,7 +85,7 @@ static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
return 0;
}
-static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
struct devlink_port *in_devlink_port;
@@ -109,7 +109,7 @@ static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
consume_skb(skb);
}
-static void mlxsw_sp_rx_acl_drop_listener(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_acl_drop_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
u32 cookie_index = mlxsw_skb_cb(skb)->rx_md_info.cookie_index;
@@ -138,7 +138,7 @@ static void mlxsw_sp_rx_acl_drop_listener(struct sk_buff *skb, u8 local_port,
consume_skb(skb);
}
-static int __mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u8 local_port,
+static int __mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
struct devlink_port *in_devlink_port;
@@ -164,7 +164,7 @@ static int __mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u8 local_port,
return 0;
}
-static void mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
int err;
@@ -176,14 +176,14 @@ static void mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u8 local_port,
netif_receive_skb(skb);
}
-static void mlxsw_sp_rx_mark_listener(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_mark_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
skb->offload_fwd_mark = 1;
mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx);
}
-static void mlxsw_sp_rx_l3_mark_listener(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_l3_mark_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
skb->offload_l3_fwd_mark = 1;
@@ -191,7 +191,7 @@ static void mlxsw_sp_rx_l3_mark_listener(struct sk_buff *skb, u8 local_port,
mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx);
}
-static void mlxsw_sp_rx_ptp_listener(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_ptp_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
@@ -212,7 +212,7 @@ static struct mlxsw_sp_port *
mlxsw_sp_sample_tx_port_get(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_rx_md_info *rx_md_info)
{
- u8 local_port;
+ u16 local_port;
if (!rx_md_info->tx_port_valid)
return NULL;
@@ -257,7 +257,7 @@ static void mlxsw_sp_psample_md_init(struct mlxsw_sp *mlxsw_sp,
md->latency <<= MLXSW_SP_MIRROR_LATENCY_SHIFT;
}
-static void mlxsw_sp_rx_sample_listener(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_sample_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
@@ -293,7 +293,7 @@ out:
consume_skb(skb);
}
-static void mlxsw_sp_rx_sample_tx_listener(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_sample_tx_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
struct mlxsw_rx_md_info *rx_md_info = &mlxsw_skb_cb(skb)->rx_md_info;
@@ -343,7 +343,7 @@ out:
consume_skb(skb);
}
-static void mlxsw_sp_rx_sample_acl_listener(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_sample_acl_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index 9380e396f648..8b7a8d879083 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -1305,12 +1305,6 @@ int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- if (config.flags) {
- netif_warn(adapter, drv, adapter->netdev,
- "ignoring hwtstamp_config.flags == 0x%08X, expected 0\n",
- config.flags);
- }
-
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
for (index = 0; index < LAN743X_MAX_TX_CHANNELS;
diff --git a/drivers/net/ethernet/microchip/lan966x/Kconfig b/drivers/net/ethernet/microchip/lan966x/Kconfig
index 83ac4266b417..2860a8c9923d 100644
--- a/drivers/net/ethernet/microchip/lan966x/Kconfig
+++ b/drivers/net/ethernet/microchip/lan966x/Kconfig
@@ -3,5 +3,6 @@ config LAN966X_SWITCH
depends on HAS_IOMEM
depends on OF
select PHYLINK
+ select PACKING
help
This driver supports the Lan966x network switch device.
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index e9e4dca6542d..101c1f005baf 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -83,10 +83,10 @@ static int lan966x_create_targets(struct platform_device *pdev,
begin[idx] = devm_ioremap(&pdev->dev,
iores[idx]->start,
resource_size(iores[idx]));
- if (IS_ERR(begin[idx])) {
+ if (!begin[idx]) {
dev_err(&pdev->dev, "Unable to get registers: %s\n",
iores[idx]->name);
- return PTR_ERR(begin[idx]);
+ return -ENOMEM;
}
}
@@ -512,11 +512,6 @@ static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args)
*buf = val;
}
- if (sz < 0) {
- err = sz;
- break;
- }
-
skb->protocol = eth_type_trans(skb, dev);
netif_rx_ni(skb);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
index 2ddb20585d40..237555845a52 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
@@ -331,7 +331,6 @@ int lan966x_port_pcs_set(struct lan966x_port *port,
struct lan966x *lan966x = port->lan966x;
bool inband_aneg = false;
bool outband;
- int err;
if (config->inband) {
if (config->portmode == PHY_INTERFACE_MODE_SGMII ||
@@ -341,11 +340,6 @@ int lan966x_port_pcs_set(struct lan966x_port *port,
config->autoneg)
inband_aneg = true; /* Clause-37 in-band-aneg */
- if (config->speed > 0) {
- err = phy_set_speed(port->serdes, config->speed);
- if (err)
- return err;
- }
outband = false;
} else {
outband = true;
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index 34b971ff8ef8..078d6a5a0768 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -480,16 +480,16 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc,
if (err)
goto out;
- err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
- &hwc_wq->msg_buf);
- if (err)
- goto out;
-
hwc_wq->hwc = hwc;
hwc_wq->gdma_wq = queue;
hwc_wq->queue_depth = q_depth;
hwc_wq->hwc_cq = hwc_cq;
+ err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
+ &hwc_wq->msg_buf);
+ if (err)
+ goto out;
+
*hwc_wq_ptr = hwc_wq;
return 0;
out:
diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile
index 722c27694b21..41b34a509308 100644
--- a/drivers/net/ethernet/mscc/Makefile
+++ b/drivers/net/ethernet/mscc/Makefile
@@ -7,9 +7,11 @@ mscc_ocelot_switch_lib-y := \
ocelot_vcap.o \
ocelot_flower.o \
ocelot_ptp.o \
- ocelot_devlink.o
+ ocelot_devlink.o \
+ vsc7514_regs.o
mscc_ocelot_switch_lib-$(CONFIG_BRIDGE_MRP) += ocelot_mrp.o
obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot.o
mscc_ocelot-y := \
+ ocelot_fdma.o \
ocelot_vsc7514.o \
ocelot_net.o
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index b1856d8c944b..9b42187a026a 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1054,14 +1054,34 @@ static int ocelot_xtr_poll_xfh(struct ocelot *ocelot, int grp, u32 *xfh)
return 0;
}
-int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
+void ocelot_ptp_rx_timestamp(struct ocelot *ocelot, struct sk_buff *skb,
+ u64 timestamp)
{
struct skb_shared_hwtstamps *shhwtstamps;
u64 tod_in_ns, full_ts_in_ns;
+ struct timespec64 ts;
+
+ ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
+
+ tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
+ if ((tod_in_ns & 0xffffffff) < timestamp)
+ full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) |
+ timestamp;
+ else
+ full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) |
+ timestamp;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamps->hwtstamp = full_ts_in_ns;
+}
+EXPORT_SYMBOL(ocelot_ptp_rx_timestamp);
+
+int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
+{
u64 timestamp, src_port, len;
u32 xfh[OCELOT_TAG_LEN / 4];
struct net_device *dev;
- struct timespec64 ts;
struct sk_buff *skb;
int sz, buf_len;
u32 val, *buf;
@@ -1117,21 +1137,8 @@ int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
*buf = val;
}
- if (ocelot->ptp) {
- ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
-
- tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
- if ((tod_in_ns & 0xffffffff) < timestamp)
- full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) |
- timestamp;
- else
- full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) |
- timestamp;
-
- shhwtstamps = skb_hwtstamps(skb);
- memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
- shhwtstamps->hwtstamp = full_ts_in_ns;
- }
+ if (ocelot->ptp)
+ ocelot_ptp_rx_timestamp(ocelot, skb, timestamp);
/* Everything we see on an interface that is in the HW bridge
* has already been forwarded.
@@ -1164,6 +1171,18 @@ bool ocelot_can_inject(struct ocelot *ocelot, int grp)
}
EXPORT_SYMBOL(ocelot_can_inject);
+void ocelot_ifh_port_set(void *ifh, int port, u32 rew_op, u32 vlan_tag)
+{
+ ocelot_ifh_set_bypass(ifh, 1);
+ ocelot_ifh_set_dest(ifh, BIT_ULL(port));
+ ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C);
+ if (vlan_tag)
+ ocelot_ifh_set_vlan_tci(ifh, vlan_tag);
+ if (rew_op)
+ ocelot_ifh_set_rew_op(ifh, rew_op);
+}
+EXPORT_SYMBOL(ocelot_ifh_port_set);
+
void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
u32 rew_op, struct sk_buff *skb)
{
@@ -1173,11 +1192,7 @@ void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
- ocelot_ifh_set_bypass(ifh, 1);
- ocelot_ifh_set_dest(ifh, BIT_ULL(port));
- ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C);
- ocelot_ifh_set_vlan_tci(ifh, skb_vlan_tag_get(skb));
- ocelot_ifh_set_rew_op(ifh, rew_op);
+ ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb));
for (i = 0; i < OCELOT_TAG_LEN / 4; i++)
ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp);
@@ -1602,10 +1617,6 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
- /* reserved for future extensions */
- if (cfg.flags)
- return -EINVAL;
-
/* Tx type sanity check */
switch (cfg.tx_type) {
case HWTSTAMP_TX_ON:
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 1eb0b5ad51e9..bf4eff6d7086 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -32,6 +32,8 @@
#define OCELOT_PTP_QUEUE_SZ 128
+#define OCELOT_JUMBO_MTU 9000
+
struct ocelot_port_tc {
bool block_shared;
unsigned long offload_cnt;
diff --git a/drivers/net/ethernet/mscc/ocelot_fdma.c b/drivers/net/ethernet/mscc/ocelot_fdma.c
new file mode 100644
index 000000000000..dffa597bffe6
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_fdma.c
@@ -0,0 +1,894 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Microsemi SoCs FDMA driver
+ *
+ * Copyright (c) 2021 Microchip
+ *
+ * Page recycling code is mostly taken from gianfar driver.
+ */
+
+#include <linux/align.h>
+#include <linux/bitops.h>
+#include <linux/dmapool.h>
+#include <linux/dsa/ocelot.h>
+#include <linux/netdevice.h>
+#include <linux/of_platform.h>
+#include <linux/skbuff.h>
+
+#include "ocelot_fdma.h"
+#include "ocelot_qs.h"
+
+DEFINE_STATIC_KEY_FALSE(ocelot_fdma_enabled);
+
+static void ocelot_fdma_writel(struct ocelot *ocelot, u32 reg, u32 data)
+{
+ regmap_write(ocelot->targets[FDMA], reg, data);
+}
+
+static u32 ocelot_fdma_readl(struct ocelot *ocelot, u32 reg)
+{
+ u32 retval;
+
+ regmap_read(ocelot->targets[FDMA], reg, &retval);
+
+ return retval;
+}
+
+static dma_addr_t ocelot_fdma_idx_dma(dma_addr_t base, u16 idx)
+{
+ return base + idx * sizeof(struct ocelot_fdma_dcb);
+}
+
+static u16 ocelot_fdma_dma_idx(dma_addr_t base, dma_addr_t dma)
+{
+ return (dma - base) / sizeof(struct ocelot_fdma_dcb);
+}
+
+static u16 ocelot_fdma_idx_next(u16 idx, u16 ring_sz)
+{
+ return unlikely(idx == ring_sz - 1) ? 0 : idx + 1;
+}
+
+static u16 ocelot_fdma_idx_prev(u16 idx, u16 ring_sz)
+{
+ return unlikely(idx == 0) ? ring_sz - 1 : idx - 1;
+}
+
+static int ocelot_fdma_rx_ring_free(struct ocelot_fdma *fdma)
+{
+ struct ocelot_fdma_rx_ring *rx_ring = &fdma->rx_ring;
+
+ if (rx_ring->next_to_use >= rx_ring->next_to_clean)
+ return OCELOT_FDMA_RX_RING_SIZE -
+ (rx_ring->next_to_use - rx_ring->next_to_clean) - 1;
+ else
+ return rx_ring->next_to_clean - rx_ring->next_to_use - 1;
+}
+
+static int ocelot_fdma_tx_ring_free(struct ocelot_fdma *fdma)
+{
+ struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring;
+
+ if (tx_ring->next_to_use >= tx_ring->next_to_clean)
+ return OCELOT_FDMA_TX_RING_SIZE -
+ (tx_ring->next_to_use - tx_ring->next_to_clean) - 1;
+ else
+ return tx_ring->next_to_clean - tx_ring->next_to_use - 1;
+}
+
+static bool ocelot_fdma_tx_ring_empty(struct ocelot_fdma *fdma)
+{
+ struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring;
+
+ return tx_ring->next_to_clean == tx_ring->next_to_use;
+}
+
+static void ocelot_fdma_activate_chan(struct ocelot *ocelot, dma_addr_t dma,
+ int chan)
+{
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_DCB_LLP(chan), dma);
+ /* Barrier to force memory writes to DCB to be completed before starting
+ * the channel.
+ */
+ wmb();
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_ACTIVATE, BIT(chan));
+}
+
+static int ocelot_fdma_wait_chan_safe(struct ocelot *ocelot, int chan)
+{
+ unsigned long timeout;
+ u32 safe;
+
+ timeout = jiffies + usecs_to_jiffies(OCELOT_FDMA_CH_SAFE_TIMEOUT_US);
+ do {
+ safe = ocelot_fdma_readl(ocelot, MSCC_FDMA_CH_SAFE);
+ if (safe & BIT(chan))
+ return 0;
+ } while (time_after(jiffies, timeout));
+
+ return -ETIMEDOUT;
+}
+
+static void ocelot_fdma_dcb_set_data(struct ocelot_fdma_dcb *dcb,
+ dma_addr_t dma_addr,
+ size_t size)
+{
+ u32 offset = dma_addr & 0x3;
+
+ dcb->llp = 0;
+ dcb->datap = ALIGN_DOWN(dma_addr, 4);
+ dcb->datal = ALIGN_DOWN(size, 4);
+ dcb->stat = MSCC_FDMA_DCB_STAT_BLOCKO(offset);
+}
+
+static bool ocelot_fdma_rx_alloc_page(struct ocelot *ocelot,
+ struct ocelot_fdma_rx_buf *rxb)
+{
+ dma_addr_t mapping;
+ struct page *page;
+
+ page = dev_alloc_page();
+ if (unlikely(!page))
+ return false;
+
+ mapping = dma_map_page(ocelot->dev, page, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ocelot->dev, mapping))) {
+ __free_page(page);
+ return false;
+ }
+
+ rxb->page = page;
+ rxb->page_offset = 0;
+ rxb->dma_addr = mapping;
+
+ return true;
+}
+
+static int ocelot_fdma_alloc_rx_buffs(struct ocelot *ocelot, u16 alloc_cnt)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+ struct ocelot_fdma_rx_ring *rx_ring;
+ struct ocelot_fdma_rx_buf *rxb;
+ struct ocelot_fdma_dcb *dcb;
+ dma_addr_t dma_addr;
+ int ret = 0;
+ u16 idx;
+
+ rx_ring = &fdma->rx_ring;
+ idx = rx_ring->next_to_use;
+
+ while (alloc_cnt--) {
+ rxb = &rx_ring->bufs[idx];
+ /* try reuse page */
+ if (unlikely(!rxb->page)) {
+ if (unlikely(!ocelot_fdma_rx_alloc_page(ocelot, rxb))) {
+ dev_err_ratelimited(ocelot->dev,
+ "Failed to allocate rx\n");
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ dcb = &rx_ring->dcbs[idx];
+ dma_addr = rxb->dma_addr + rxb->page_offset;
+ ocelot_fdma_dcb_set_data(dcb, dma_addr, OCELOT_FDMA_RXB_SIZE);
+
+ idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE);
+ /* Chain the DCB to the next one */
+ dcb->llp = ocelot_fdma_idx_dma(rx_ring->dcbs_dma, idx);
+ }
+
+ rx_ring->next_to_use = idx;
+ rx_ring->next_to_alloc = idx;
+
+ return ret;
+}
+
+static bool ocelot_fdma_tx_dcb_set_skb(struct ocelot *ocelot,
+ struct ocelot_fdma_tx_buf *tx_buf,
+ struct ocelot_fdma_dcb *dcb,
+ struct sk_buff *skb)
+{
+ dma_addr_t mapping;
+
+ mapping = dma_map_single(ocelot->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(ocelot->dev, mapping)))
+ return false;
+
+ dma_unmap_addr_set(tx_buf, dma_addr, mapping);
+
+ ocelot_fdma_dcb_set_data(dcb, mapping, OCELOT_FDMA_RX_SIZE);
+ tx_buf->skb = skb;
+ dcb->stat |= MSCC_FDMA_DCB_STAT_BLOCKL(skb->len);
+ dcb->stat |= MSCC_FDMA_DCB_STAT_SOF | MSCC_FDMA_DCB_STAT_EOF;
+
+ return true;
+}
+
+static bool ocelot_fdma_check_stop_rx(struct ocelot *ocelot)
+{
+ u32 llp;
+
+ /* Check if the FDMA hits the DCB with LLP == NULL */
+ llp = ocelot_fdma_readl(ocelot, MSCC_FDMA_DCB_LLP(MSCC_FDMA_XTR_CHAN));
+ if (unlikely(llp))
+ return false;
+
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_DISABLE,
+ BIT(MSCC_FDMA_XTR_CHAN));
+
+ return true;
+}
+
+static void ocelot_fdma_rx_set_llp(struct ocelot_fdma_rx_ring *rx_ring)
+{
+ struct ocelot_fdma_dcb *dcb;
+ unsigned int idx;
+
+ idx = ocelot_fdma_idx_prev(rx_ring->next_to_use,
+ OCELOT_FDMA_RX_RING_SIZE);
+ dcb = &rx_ring->dcbs[idx];
+ dcb->llp = 0;
+}
+
+static void ocelot_fdma_rx_restart(struct ocelot *ocelot)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+ struct ocelot_fdma_rx_ring *rx_ring;
+ const u8 chan = MSCC_FDMA_XTR_CHAN;
+ dma_addr_t new_llp, dma_base;
+ unsigned int idx;
+ u32 llp_prev;
+ int ret;
+
+ rx_ring = &fdma->rx_ring;
+ ret = ocelot_fdma_wait_chan_safe(ocelot, chan);
+ if (ret) {
+ dev_err_ratelimited(ocelot->dev,
+ "Unable to stop RX channel\n");
+ return;
+ }
+
+ ocelot_fdma_rx_set_llp(rx_ring);
+
+ /* FDMA stopped on the last DCB that contained a NULL LLP, since
+ * we processed some DCBs in RX, there is free space, and we must set
+ * DCB_LLP to point to the next DCB
+ */
+ llp_prev = ocelot_fdma_readl(ocelot, MSCC_FDMA_DCB_LLP_PREV(chan));
+ dma_base = rx_ring->dcbs_dma;
+
+ /* Get the next DMA addr located after LLP == NULL DCB */
+ idx = ocelot_fdma_dma_idx(dma_base, llp_prev);
+ idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE);
+ new_llp = ocelot_fdma_idx_dma(dma_base, idx);
+
+ /* Finally reactivate the channel */
+ ocelot_fdma_activate_chan(ocelot, new_llp, chan);
+}
+
+static bool ocelot_fdma_add_rx_frag(struct ocelot_fdma_rx_buf *rxb, u32 stat,
+ struct sk_buff *skb, bool first)
+{
+ int size = MSCC_FDMA_DCB_STAT_BLOCKL(stat);
+ struct page *page = rxb->page;
+
+ if (likely(first)) {
+ skb_put(skb, size);
+ } else {
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rxb->page_offset, size, OCELOT_FDMA_RX_SIZE);
+ }
+
+ /* Try to reuse page */
+ if (unlikely(page_ref_count(page) != 1 || page_is_pfmemalloc(page)))
+ return false;
+
+ /* Change offset to the other half */
+ rxb->page_offset ^= OCELOT_FDMA_RX_SIZE;
+
+ page_ref_inc(page);
+
+ return true;
+}
+
+static void ocelot_fdma_reuse_rx_page(struct ocelot *ocelot,
+ struct ocelot_fdma_rx_buf *old_rxb)
+{
+ struct ocelot_fdma_rx_ring *rx_ring = &ocelot->fdma->rx_ring;
+ struct ocelot_fdma_rx_buf *new_rxb;
+
+ new_rxb = &rx_ring->bufs[rx_ring->next_to_alloc];
+ rx_ring->next_to_alloc = ocelot_fdma_idx_next(rx_ring->next_to_alloc,
+ OCELOT_FDMA_RX_RING_SIZE);
+
+ /* Copy page reference */
+ *new_rxb = *old_rxb;
+
+ /* Sync for use by the device */
+ dma_sync_single_range_for_device(ocelot->dev, old_rxb->dma_addr,
+ old_rxb->page_offset,
+ OCELOT_FDMA_RX_SIZE, DMA_FROM_DEVICE);
+}
+
+static struct sk_buff *ocelot_fdma_get_skb(struct ocelot *ocelot, u32 stat,
+ struct ocelot_fdma_rx_buf *rxb,
+ struct sk_buff *skb)
+{
+ bool first = false;
+
+ /* Allocate skb head and data */
+ if (likely(!skb)) {
+ void *buff_addr = page_address(rxb->page) +
+ rxb->page_offset;
+
+ skb = build_skb(buff_addr, OCELOT_FDMA_SKBFRAG_SIZE);
+ if (unlikely(!skb)) {
+ dev_err_ratelimited(ocelot->dev,
+ "build_skb failed !\n");
+ return NULL;
+ }
+ first = true;
+ }
+
+ dma_sync_single_range_for_cpu(ocelot->dev, rxb->dma_addr,
+ rxb->page_offset, OCELOT_FDMA_RX_SIZE,
+ DMA_FROM_DEVICE);
+
+ if (ocelot_fdma_add_rx_frag(rxb, stat, skb, first)) {
+ /* Reuse the free half of the page for the next_to_alloc DCB*/
+ ocelot_fdma_reuse_rx_page(ocelot, rxb);
+ } else {
+ /* page cannot be reused, unmap it */
+ dma_unmap_page(ocelot->dev, rxb->dma_addr, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ }
+
+ /* clear rx buff content */
+ rxb->page = NULL;
+
+ return skb;
+}
+
+static bool ocelot_fdma_receive_skb(struct ocelot *ocelot, struct sk_buff *skb)
+{
+ struct net_device *ndev;
+ void *xfh = skb->data;
+ u64 timestamp;
+ u64 src_port;
+
+ skb_pull(skb, OCELOT_TAG_LEN);
+
+ ocelot_xfh_get_src_port(xfh, &src_port);
+ if (unlikely(src_port >= ocelot->num_phys_ports))
+ return false;
+
+ ndev = ocelot_port_to_netdev(ocelot, src_port);
+ if (unlikely(!ndev))
+ return false;
+
+ pskb_trim(skb, skb->len - ETH_FCS_LEN);
+
+ skb->dev = ndev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ skb->dev->stats.rx_bytes += skb->len;
+ skb->dev->stats.rx_packets++;
+
+ if (ocelot->ptp) {
+ ocelot_xfh_get_rew_val(xfh, &timestamp);
+ ocelot_ptp_rx_timestamp(ocelot, skb, timestamp);
+ }
+
+ if (likely(!skb_defer_rx_timestamp(skb)))
+ netif_receive_skb(skb);
+
+ return true;
+}
+
+static int ocelot_fdma_rx_get(struct ocelot *ocelot, int budget)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+ struct ocelot_fdma_rx_ring *rx_ring;
+ struct ocelot_fdma_rx_buf *rxb;
+ struct ocelot_fdma_dcb *dcb;
+ struct sk_buff *skb;
+ int work_done = 0;
+ int cleaned_cnt;
+ u32 stat;
+ u16 idx;
+
+ cleaned_cnt = ocelot_fdma_rx_ring_free(fdma);
+ rx_ring = &fdma->rx_ring;
+ skb = rx_ring->skb;
+
+ while (budget--) {
+ idx = rx_ring->next_to_clean;
+ dcb = &rx_ring->dcbs[idx];
+ stat = dcb->stat;
+ if (MSCC_FDMA_DCB_STAT_BLOCKL(stat) == 0)
+ break;
+
+ /* New packet is a start of frame but we already got a skb set,
+ * we probably lost an EOF packet, free skb
+ */
+ if (unlikely(skb && (stat & MSCC_FDMA_DCB_STAT_SOF))) {
+ dev_kfree_skb(skb);
+ skb = NULL;
+ }
+
+ rxb = &rx_ring->bufs[idx];
+ /* Fetch next to clean buffer from the rx_ring */
+ skb = ocelot_fdma_get_skb(ocelot, stat, rxb, skb);
+ if (unlikely(!skb))
+ break;
+
+ work_done++;
+ cleaned_cnt++;
+
+ idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE);
+ rx_ring->next_to_clean = idx;
+
+ if (unlikely(stat & MSCC_FDMA_DCB_STAT_ABORT ||
+ stat & MSCC_FDMA_DCB_STAT_PD)) {
+ dev_err_ratelimited(ocelot->dev,
+ "DCB aborted or pruned\n");
+ dev_kfree_skb(skb);
+ skb = NULL;
+ continue;
+ }
+
+ /* We still need to process the other fragment of the packet
+ * before delivering it to the network stack
+ */
+ if (!(stat & MSCC_FDMA_DCB_STAT_EOF))
+ continue;
+
+ if (unlikely(!ocelot_fdma_receive_skb(ocelot, skb)))
+ dev_kfree_skb(skb);
+
+ skb = NULL;
+ }
+
+ rx_ring->skb = skb;
+
+ if (cleaned_cnt)
+ ocelot_fdma_alloc_rx_buffs(ocelot, cleaned_cnt);
+
+ return work_done;
+}
+
+static void ocelot_fdma_wakeup_netdev(struct ocelot *ocelot)
+{
+ struct ocelot_port_private *priv;
+ struct ocelot_port *ocelot_port;
+ struct net_device *dev;
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ ocelot_port = ocelot->ports[port];
+ if (!ocelot_port)
+ continue;
+ priv = container_of(ocelot_port, struct ocelot_port_private,
+ port);
+ dev = priv->dev;
+
+ if (unlikely(netif_queue_stopped(dev)))
+ netif_wake_queue(dev);
+ }
+}
+
+static void ocelot_fdma_tx_cleanup(struct ocelot *ocelot, int budget)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+ struct ocelot_fdma_tx_ring *tx_ring;
+ struct ocelot_fdma_tx_buf *buf;
+ unsigned int new_null_llp_idx;
+ struct ocelot_fdma_dcb *dcb;
+ bool end_of_list = false;
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ u32 dcb_llp;
+ u16 ntc;
+ int ret;
+
+ tx_ring = &fdma->tx_ring;
+
+ /* Purge the TX packets that have been sent up to the NULL llp or the
+ * end of done list.
+ */
+ while (!ocelot_fdma_tx_ring_empty(fdma)) {
+ ntc = tx_ring->next_to_clean;
+ dcb = &tx_ring->dcbs[ntc];
+ if (!(dcb->stat & MSCC_FDMA_DCB_STAT_PD))
+ break;
+
+ buf = &tx_ring->bufs[ntc];
+ skb = buf->skb;
+ dma_unmap_single(ocelot->dev, dma_unmap_addr(buf, dma_addr),
+ skb->len, DMA_TO_DEVICE);
+ napi_consume_skb(skb, budget);
+ dcb_llp = dcb->llp;
+
+ /* Only update after accessing all dcb fields */
+ tx_ring->next_to_clean = ocelot_fdma_idx_next(ntc,
+ OCELOT_FDMA_TX_RING_SIZE);
+
+ /* If we hit the NULL LLP, stop, we might need to reload FDMA */
+ if (dcb_llp == 0) {
+ end_of_list = true;
+ break;
+ }
+ }
+
+ /* No need to try to wake if there were no TX cleaned_cnt up. */
+ if (ocelot_fdma_tx_ring_free(fdma))
+ ocelot_fdma_wakeup_netdev(ocelot);
+
+ /* If there is still some DCBs to be processed by the FDMA or if the
+ * pending list is empty, there is no need to restart the FDMA.
+ */
+ if (!end_of_list || ocelot_fdma_tx_ring_empty(fdma))
+ return;
+
+ ret = ocelot_fdma_wait_chan_safe(ocelot, MSCC_FDMA_INJ_CHAN);
+ if (ret) {
+ dev_warn(ocelot->dev,
+ "Failed to wait for TX channel to stop\n");
+ return;
+ }
+
+ /* Set NULL LLP to be the last DCB used */
+ new_null_llp_idx = ocelot_fdma_idx_prev(tx_ring->next_to_use,
+ OCELOT_FDMA_TX_RING_SIZE);
+ dcb = &tx_ring->dcbs[new_null_llp_idx];
+ dcb->llp = 0;
+
+ dma = ocelot_fdma_idx_dma(tx_ring->dcbs_dma, tx_ring->next_to_clean);
+ ocelot_fdma_activate_chan(ocelot, dma, MSCC_FDMA_INJ_CHAN);
+}
+
+static int ocelot_fdma_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct ocelot_fdma *fdma = container_of(napi, struct ocelot_fdma, napi);
+ struct ocelot *ocelot = fdma->ocelot;
+ int work_done = 0;
+ bool rx_stopped;
+
+ ocelot_fdma_tx_cleanup(ocelot, budget);
+
+ rx_stopped = ocelot_fdma_check_stop_rx(ocelot);
+
+ work_done = ocelot_fdma_rx_get(ocelot, budget);
+
+ if (rx_stopped)
+ ocelot_fdma_rx_restart(ocelot);
+
+ if (work_done < budget) {
+ napi_complete_done(&fdma->napi, work_done);
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA,
+ BIT(MSCC_FDMA_INJ_CHAN) |
+ BIT(MSCC_FDMA_XTR_CHAN));
+ }
+
+ return work_done;
+}
+
+static irqreturn_t ocelot_fdma_interrupt(int irq, void *dev_id)
+{
+ u32 ident, llp, frm, err, err_code;
+ struct ocelot *ocelot = dev_id;
+
+ ident = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_IDENT);
+ frm = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_FRM);
+ llp = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_LLP);
+
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP, llp & ident);
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM, frm & ident);
+ if (frm || llp) {
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0);
+ napi_schedule(&ocelot->fdma->napi);
+ }
+
+ err = ocelot_fdma_readl(ocelot, MSCC_FDMA_EVT_ERR);
+ if (unlikely(err)) {
+ err_code = ocelot_fdma_readl(ocelot, MSCC_FDMA_EVT_ERR_CODE);
+ dev_err_ratelimited(ocelot->dev,
+ "Error ! chans mask: %#x, code: %#x\n",
+ err, err_code);
+
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_EVT_ERR, err);
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_EVT_ERR_CODE, err_code);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void ocelot_fdma_send_skb(struct ocelot *ocelot,
+ struct ocelot_fdma *fdma, struct sk_buff *skb)
+{
+ struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring;
+ struct ocelot_fdma_tx_buf *tx_buf;
+ struct ocelot_fdma_dcb *dcb;
+ dma_addr_t dma;
+ u16 next_idx;
+
+ dcb = &tx_ring->dcbs[tx_ring->next_to_use];
+ tx_buf = &tx_ring->bufs[tx_ring->next_to_use];
+ if (!ocelot_fdma_tx_dcb_set_skb(ocelot, tx_buf, dcb, skb)) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ next_idx = ocelot_fdma_idx_next(tx_ring->next_to_use,
+ OCELOT_FDMA_TX_RING_SIZE);
+ skb_tx_timestamp(skb);
+
+ /* If the FDMA TX chan is empty, then enqueue the DCB directly */
+ if (ocelot_fdma_tx_ring_empty(fdma)) {
+ dma = ocelot_fdma_idx_dma(tx_ring->dcbs_dma,
+ tx_ring->next_to_use);
+ ocelot_fdma_activate_chan(ocelot, dma, MSCC_FDMA_INJ_CHAN);
+ } else {
+ /* Chain the DCBs */
+ dcb->llp = ocelot_fdma_idx_dma(tx_ring->dcbs_dma, next_idx);
+ }
+
+ tx_ring->next_to_use = next_idx;
+}
+
+static int ocelot_fdma_prepare_skb(struct ocelot *ocelot, int port, u32 rew_op,
+ struct sk_buff *skb, struct net_device *dev)
+{
+ int needed_headroom = max_t(int, OCELOT_TAG_LEN - skb_headroom(skb), 0);
+ int needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
+ void *ifh;
+ int err;
+
+ if (unlikely(needed_headroom || needed_tailroom ||
+ skb_header_cloned(skb))) {
+ err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
+ GFP_ATOMIC);
+ if (unlikely(err)) {
+ dev_kfree_skb_any(skb);
+ return 1;
+ }
+ }
+
+ err = skb_linearize(skb);
+ if (err) {
+ net_err_ratelimited("%s: skb_linearize error (%d)!\n",
+ dev->name, err);
+ dev_kfree_skb_any(skb);
+ return 1;
+ }
+
+ ifh = skb_push(skb, OCELOT_TAG_LEN);
+ skb_put(skb, ETH_FCS_LEN);
+ memset(ifh, 0, OCELOT_TAG_LEN);
+ ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb));
+
+ return 0;
+}
+
+int ocelot_fdma_inject_frame(struct ocelot *ocelot, int port, u32 rew_op,
+ struct sk_buff *skb, struct net_device *dev)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+ int ret = NETDEV_TX_OK;
+
+ spin_lock(&fdma->tx_ring.xmit_lock);
+
+ if (ocelot_fdma_tx_ring_free(fdma) == 0) {
+ netif_stop_queue(dev);
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ if (ocelot_fdma_prepare_skb(ocelot, port, rew_op, skb, dev))
+ goto out;
+
+ ocelot_fdma_send_skb(ocelot, fdma, skb);
+
+out:
+ spin_unlock(&fdma->tx_ring.xmit_lock);
+
+ return ret;
+}
+
+static void ocelot_fdma_free_rx_ring(struct ocelot *ocelot)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+ struct ocelot_fdma_rx_ring *rx_ring;
+ struct ocelot_fdma_rx_buf *rxb;
+ u16 idx;
+
+ rx_ring = &fdma->rx_ring;
+ idx = rx_ring->next_to_clean;
+
+ /* Free the pages held in the RX ring */
+ while (idx != rx_ring->next_to_use) {
+ rxb = &rx_ring->bufs[idx];
+ dma_unmap_page(ocelot->dev, rxb->dma_addr, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ __free_page(rxb->page);
+ idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE);
+ }
+
+ if (fdma->rx_ring.skb)
+ dev_kfree_skb_any(fdma->rx_ring.skb);
+}
+
+static void ocelot_fdma_free_tx_ring(struct ocelot *ocelot)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+ struct ocelot_fdma_tx_ring *tx_ring;
+ struct ocelot_fdma_tx_buf *txb;
+ struct sk_buff *skb;
+ u16 idx;
+
+ tx_ring = &fdma->tx_ring;
+ idx = tx_ring->next_to_clean;
+
+ while (idx != tx_ring->next_to_use) {
+ txb = &tx_ring->bufs[idx];
+ skb = txb->skb;
+ dma_unmap_single(ocelot->dev, dma_unmap_addr(txb, dma_addr),
+ skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_TX_RING_SIZE);
+ }
+}
+
+static int ocelot_fdma_rings_alloc(struct ocelot *ocelot)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+ struct ocelot_fdma_dcb *dcbs;
+ unsigned int adjust;
+ dma_addr_t dcbs_dma;
+ int ret;
+
+ /* Create a pool of consistent memory blocks for hardware descriptors */
+ fdma->dcbs_base = dmam_alloc_coherent(ocelot->dev,
+ OCELOT_DCBS_HW_ALLOC_SIZE,
+ &fdma->dcbs_dma_base, GFP_KERNEL);
+ if (!fdma->dcbs_base)
+ return -ENOMEM;
+
+ /* DCBs must be aligned on a 32bit boundary */
+ dcbs = fdma->dcbs_base;
+ dcbs_dma = fdma->dcbs_dma_base;
+ if (!IS_ALIGNED(dcbs_dma, 4)) {
+ adjust = dcbs_dma & 0x3;
+ dcbs_dma = ALIGN(dcbs_dma, 4);
+ dcbs = (void *)dcbs + adjust;
+ }
+
+ /* TX queue */
+ fdma->tx_ring.dcbs = dcbs;
+ fdma->tx_ring.dcbs_dma = dcbs_dma;
+ spin_lock_init(&fdma->tx_ring.xmit_lock);
+
+ /* RX queue */
+ fdma->rx_ring.dcbs = dcbs + OCELOT_FDMA_TX_RING_SIZE;
+ fdma->rx_ring.dcbs_dma = dcbs_dma + OCELOT_FDMA_TX_DCB_SIZE;
+ ret = ocelot_fdma_alloc_rx_buffs(ocelot,
+ ocelot_fdma_tx_ring_free(fdma));
+ if (ret) {
+ ocelot_fdma_free_rx_ring(ocelot);
+ return ret;
+ }
+
+ /* Set the last DCB LLP as NULL, this is normally done when restarting
+ * the RX chan, but this is for the first run
+ */
+ ocelot_fdma_rx_set_llp(&fdma->rx_ring);
+
+ return 0;
+}
+
+void ocelot_fdma_netdev_init(struct ocelot *ocelot, struct net_device *dev)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+
+ dev->needed_headroom = OCELOT_TAG_LEN;
+ dev->needed_tailroom = ETH_FCS_LEN;
+
+ if (fdma->ndev)
+ return;
+
+ fdma->ndev = dev;
+ netif_napi_add(dev, &fdma->napi, ocelot_fdma_napi_poll,
+ OCELOT_FDMA_WEIGHT);
+}
+
+void ocelot_fdma_netdev_deinit(struct ocelot *ocelot, struct net_device *dev)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+
+ if (fdma->ndev == dev) {
+ netif_napi_del(&fdma->napi);
+ fdma->ndev = NULL;
+ }
+}
+
+void ocelot_fdma_init(struct platform_device *pdev, struct ocelot *ocelot)
+{
+ struct device *dev = ocelot->dev;
+ struct ocelot_fdma *fdma;
+ int ret;
+
+ fdma = devm_kzalloc(dev, sizeof(*fdma), GFP_KERNEL);
+ if (!fdma)
+ return;
+
+ ocelot->fdma = fdma;
+ ocelot->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0);
+
+ fdma->ocelot = ocelot;
+ fdma->irq = platform_get_irq_byname(pdev, "fdma");
+ ret = devm_request_irq(dev, fdma->irq, ocelot_fdma_interrupt, 0,
+ dev_name(dev), ocelot);
+ if (ret)
+ goto err_free_fdma;
+
+ ret = ocelot_fdma_rings_alloc(ocelot);
+ if (ret)
+ goto err_free_irq;
+
+ static_branch_enable(&ocelot_fdma_enabled);
+
+ return;
+
+err_free_irq:
+ devm_free_irq(dev, fdma->irq, fdma);
+err_free_fdma:
+ devm_kfree(dev, fdma);
+
+ ocelot->fdma = NULL;
+}
+
+void ocelot_fdma_start(struct ocelot *ocelot)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+
+ /* Reconfigure for extraction and injection using DMA */
+ ocelot_write_rix(ocelot, QS_INJ_GRP_CFG_MODE(2), QS_INJ_GRP_CFG, 0);
+ ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(0), QS_INJ_CTRL, 0);
+
+ ocelot_write_rix(ocelot, QS_XTR_GRP_CFG_MODE(2), QS_XTR_GRP_CFG, 0);
+
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP, 0xffffffff);
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM, 0xffffffff);
+
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP_ENA,
+ BIT(MSCC_FDMA_INJ_CHAN) | BIT(MSCC_FDMA_XTR_CHAN));
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM_ENA,
+ BIT(MSCC_FDMA_XTR_CHAN));
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA,
+ BIT(MSCC_FDMA_INJ_CHAN) | BIT(MSCC_FDMA_XTR_CHAN));
+
+ napi_enable(&fdma->napi);
+
+ ocelot_fdma_activate_chan(ocelot, ocelot->fdma->rx_ring.dcbs_dma,
+ MSCC_FDMA_XTR_CHAN);
+}
+
+void ocelot_fdma_deinit(struct ocelot *ocelot)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0);
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_FORCEDIS,
+ BIT(MSCC_FDMA_XTR_CHAN));
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_FORCEDIS,
+ BIT(MSCC_FDMA_INJ_CHAN));
+ napi_synchronize(&fdma->napi);
+ napi_disable(&fdma->napi);
+
+ ocelot_fdma_free_rx_ring(ocelot);
+ ocelot_fdma_free_tx_ring(ocelot);
+}
diff --git a/drivers/net/ethernet/mscc/ocelot_fdma.h b/drivers/net/ethernet/mscc/ocelot_fdma.h
new file mode 100644
index 000000000000..2fc8e1dd7230
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_fdma.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi SoCs FDMA driver
+ *
+ * Copyright (c) 2021 Microchip
+ */
+#ifndef _MSCC_OCELOT_FDMA_H_
+#define _MSCC_OCELOT_FDMA_H_
+
+#include "ocelot.h"
+
+#define MSCC_FDMA_DCB_STAT_BLOCKO(x) (((x) << 20) & GENMASK(31, 20))
+#define MSCC_FDMA_DCB_STAT_BLOCKO_M GENMASK(31, 20)
+#define MSCC_FDMA_DCB_STAT_BLOCKO_X(x) (((x) & GENMASK(31, 20)) >> 20)
+#define MSCC_FDMA_DCB_STAT_PD BIT(19)
+#define MSCC_FDMA_DCB_STAT_ABORT BIT(18)
+#define MSCC_FDMA_DCB_STAT_EOF BIT(17)
+#define MSCC_FDMA_DCB_STAT_SOF BIT(16)
+#define MSCC_FDMA_DCB_STAT_BLOCKL_M GENMASK(15, 0)
+#define MSCC_FDMA_DCB_STAT_BLOCKL(x) ((x) & GENMASK(15, 0))
+
+#define MSCC_FDMA_DCB_LLP(x) ((x) * 4 + 0x0)
+#define MSCC_FDMA_DCB_LLP_PREV(x) ((x) * 4 + 0xA0)
+#define MSCC_FDMA_CH_SAFE 0xcc
+#define MSCC_FDMA_CH_ACTIVATE 0xd0
+#define MSCC_FDMA_CH_DISABLE 0xd4
+#define MSCC_FDMA_CH_FORCEDIS 0xd8
+#define MSCC_FDMA_EVT_ERR 0x164
+#define MSCC_FDMA_EVT_ERR_CODE 0x168
+#define MSCC_FDMA_INTR_LLP 0x16c
+#define MSCC_FDMA_INTR_LLP_ENA 0x170
+#define MSCC_FDMA_INTR_FRM 0x174
+#define MSCC_FDMA_INTR_FRM_ENA 0x178
+#define MSCC_FDMA_INTR_ENA 0x184
+#define MSCC_FDMA_INTR_IDENT 0x188
+
+#define MSCC_FDMA_INJ_CHAN 2
+#define MSCC_FDMA_XTR_CHAN 0
+
+#define OCELOT_FDMA_WEIGHT 32
+
+#define OCELOT_FDMA_CH_SAFE_TIMEOUT_US 10
+
+#define OCELOT_FDMA_RX_RING_SIZE 512
+#define OCELOT_FDMA_TX_RING_SIZE 128
+
+#define OCELOT_FDMA_RX_DCB_SIZE (OCELOT_FDMA_RX_RING_SIZE * \
+ sizeof(struct ocelot_fdma_dcb))
+#define OCELOT_FDMA_TX_DCB_SIZE (OCELOT_FDMA_TX_RING_SIZE * \
+ sizeof(struct ocelot_fdma_dcb))
+/* +4 allows for word alignment after allocation */
+#define OCELOT_DCBS_HW_ALLOC_SIZE (OCELOT_FDMA_RX_DCB_SIZE + \
+ OCELOT_FDMA_TX_DCB_SIZE + \
+ 4)
+
+#define OCELOT_FDMA_RX_SIZE (PAGE_SIZE / 2)
+
+#define OCELOT_FDMA_SKBFRAG_OVR (4 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define OCELOT_FDMA_RXB_SIZE ALIGN_DOWN(OCELOT_FDMA_RX_SIZE - OCELOT_FDMA_SKBFRAG_OVR, 4)
+#define OCELOT_FDMA_SKBFRAG_SIZE (OCELOT_FDMA_RXB_SIZE + OCELOT_FDMA_SKBFRAG_OVR)
+
+DECLARE_STATIC_KEY_FALSE(ocelot_fdma_enabled);
+
+struct ocelot_fdma_dcb {
+ u32 llp;
+ u32 datap;
+ u32 datal;
+ u32 stat;
+} __packed;
+
+/**
+ * struct ocelot_fdma_tx_buf - TX buffer structure
+ * @skb: SKB currently used in the corresponding DCB.
+ * @dma_addr: SKB DMA mapped address.
+ */
+struct ocelot_fdma_tx_buf {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+};
+
+/**
+ * struct ocelot_fdma_tx_ring - TX ring description of DCBs
+ *
+ * @dcbs: DCBs allocated for the ring
+ * @dcbs_dma: DMA base address of the DCBs
+ * @bufs: List of TX buffer associated to the DCBs
+ * @xmit_lock: lock for concurrent xmit access
+ * @next_to_clean: Next DCB to be cleaned in tx_cleanup
+ * @next_to_use: Next available DCB to send SKB
+ */
+struct ocelot_fdma_tx_ring {
+ struct ocelot_fdma_dcb *dcbs;
+ dma_addr_t dcbs_dma;
+ struct ocelot_fdma_tx_buf bufs[OCELOT_FDMA_TX_RING_SIZE];
+ /* Protect concurrent xmit calls */
+ spinlock_t xmit_lock;
+ u16 next_to_clean;
+ u16 next_to_use;
+};
+
+/**
+ * struct ocelot_fdma_rx_buf - RX buffer structure
+ * @page: Struct page used in this buffer
+ * @page_offset: Current page offset (either 0 or PAGE_SIZE/2)
+ * @dma_addr: DMA address of the page
+ */
+struct ocelot_fdma_rx_buf {
+ struct page *page;
+ u32 page_offset;
+ dma_addr_t dma_addr;
+};
+
+/**
+ * struct ocelot_fdma_rx_ring - TX ring description of DCBs
+ *
+ * @dcbs: DCBs allocated for the ring
+ * @dcbs_dma: DMA base address of the DCBs
+ * @bufs: List of RX buffer associated to the DCBs
+ * @skb: SKB currently received by the netdev
+ * @next_to_clean: Next DCB to be cleaned NAPI polling
+ * @next_to_use: Next available DCB to send SKB
+ * @next_to_alloc: Next buffer that needs to be allocated (page reuse or alloc)
+ */
+struct ocelot_fdma_rx_ring {
+ struct ocelot_fdma_dcb *dcbs;
+ dma_addr_t dcbs_dma;
+ struct ocelot_fdma_rx_buf bufs[OCELOT_FDMA_RX_RING_SIZE];
+ struct sk_buff *skb;
+ u16 next_to_clean;
+ u16 next_to_use;
+ u16 next_to_alloc;
+};
+
+/**
+ * struct ocelot_fdma - FDMA context
+ *
+ * @irq: FDMA interrupt
+ * @ndev: Net device used to initialize NAPI
+ * @dcbs_base: Memory coherent DCBs
+ * @dcbs_dma_base: DMA base address of memory coherent DCBs
+ * @tx_ring: Injection ring
+ * @rx_ring: Extraction ring
+ * @napi: NAPI context
+ * @ocelot: Back-pointer to ocelot struct
+ */
+struct ocelot_fdma {
+ int irq;
+ struct net_device *ndev;
+ struct ocelot_fdma_dcb *dcbs_base;
+ dma_addr_t dcbs_dma_base;
+ struct ocelot_fdma_tx_ring tx_ring;
+ struct ocelot_fdma_rx_ring rx_ring;
+ struct napi_struct napi;
+ struct ocelot *ocelot;
+};
+
+void ocelot_fdma_init(struct platform_device *pdev, struct ocelot *ocelot);
+void ocelot_fdma_start(struct ocelot *ocelot);
+void ocelot_fdma_deinit(struct ocelot *ocelot);
+int ocelot_fdma_inject_frame(struct ocelot *fdma, int port, u32 rew_op,
+ struct sk_buff *skb, struct net_device *dev);
+void ocelot_fdma_netdev_init(struct ocelot *ocelot, struct net_device *dev);
+void ocelot_fdma_netdev_deinit(struct ocelot *ocelot,
+ struct net_device *dev);
+
+#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 0fcf359a6975..8115c3db252e 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -15,6 +15,7 @@
#include <net/pkt_cls.h>
#include "ocelot.h"
#include "ocelot_vcap.h"
+#include "ocelot_fdma.h"
#define OCELOT_MAC_QUIRKS OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP
@@ -457,7 +458,8 @@ static netdev_tx_t ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
int port = priv->chip_port;
u32 rew_op = 0;
- if (!ocelot_can_inject(ocelot, 0))
+ if (!static_branch_unlikely(&ocelot_fdma_enabled) &&
+ !ocelot_can_inject(ocelot, 0))
return NETDEV_TX_BUSY;
/* Check if timestamping is needed */
@@ -475,9 +477,13 @@ static netdev_tx_t ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
rew_op = ocelot_ptp_rew_op(skb);
}
- ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
+ if (static_branch_unlikely(&ocelot_fdma_enabled)) {
+ ocelot_fdma_inject_frame(ocelot, port, rew_op, skb, dev);
+ } else {
+ ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
- kfree_skb(skb);
+ consume_skb(skb);
+ }
return NETDEV_TX_OK;
}
@@ -764,10 +770,23 @@ static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return phy_mii_ioctl(dev->phydev, ifr, cmd);
}
+static int ocelot_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+
+ ocelot_port_set_maxlen(ocelot, priv->chip_port, new_mtu);
+ WRITE_ONCE(dev->mtu, new_mtu);
+
+ return 0;
+}
+
static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_open = ocelot_port_open,
.ndo_stop = ocelot_port_stop,
.ndo_start_xmit = ocelot_port_xmit,
+ .ndo_change_mtu = ocelot_change_mtu,
.ndo_set_rx_mode = ocelot_set_rx_mode,
.ndo_set_mac_address = ocelot_port_set_mac_address,
.ndo_get_stats64 = ocelot_get_stats64,
@@ -1670,12 +1689,16 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
dev->netdev_ops = &ocelot_port_netdev_ops;
dev->ethtool_ops = &ocelot_ethtool_ops;
+ dev->max_mtu = OCELOT_JUMBO_MTU;
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXFCS |
NETIF_F_HW_TC;
dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
- eth_hw_addr_gen(dev, ocelot->base_mac, port);
+ err = of_get_ethdev_address(portnp, dev);
+ if (err)
+ eth_hw_addr_gen(dev, ocelot->base_mac, port);
+
ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr,
OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
@@ -1685,14 +1708,20 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
if (err)
goto out;
+ if (ocelot->fdma)
+ ocelot_fdma_netdev_init(ocelot, dev);
+
err = register_netdev(dev);
if (err) {
dev_err(ocelot->dev, "register_netdev failed\n");
- goto out;
+ goto out_fdma_deinit;
}
return 0;
+out_fdma_deinit:
+ if (ocelot->fdma)
+ ocelot_fdma_netdev_deinit(ocelot, dev);
out:
ocelot->ports[port] = NULL;
free_netdev(dev);
@@ -1705,9 +1734,14 @@ void ocelot_release_port(struct ocelot_port *ocelot_port)
struct ocelot_port_private *priv = container_of(ocelot_port,
struct ocelot_port_private,
port);
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ struct ocelot_fdma *fdma = ocelot->fdma;
unregister_netdev(priv->dev);
+ if (fdma)
+ ocelot_fdma_netdev_deinit(ocelot, priv->dev);
+
if (priv->phylink) {
rtnl_lock();
phylink_disconnect_phy(priv->phylink);
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index cd3eb101f159..4f4a495a60ad 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -18,316 +18,24 @@
#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot_hsio.h>
+#include <soc/mscc/vsc7514_regs.h>
+#include "ocelot_fdma.h"
#include "ocelot.h"
#define VSC7514_VCAP_POLICER_BASE 128
#define VSC7514_VCAP_POLICER_MAX 191
-static const u32 ocelot_ana_regmap[] = {
- REG(ANA_ADVLEARN, 0x009000),
- REG(ANA_VLANMASK, 0x009004),
- REG(ANA_PORT_B_DOMAIN, 0x009008),
- REG(ANA_ANAGEFIL, 0x00900c),
- REG(ANA_ANEVENTS, 0x009010),
- REG(ANA_STORMLIMIT_BURST, 0x009014),
- REG(ANA_STORMLIMIT_CFG, 0x009018),
- REG(ANA_ISOLATED_PORTS, 0x009028),
- REG(ANA_COMMUNITY_PORTS, 0x00902c),
- REG(ANA_AUTOAGE, 0x009030),
- REG(ANA_MACTOPTIONS, 0x009034),
- REG(ANA_LEARNDISC, 0x009038),
- REG(ANA_AGENCTRL, 0x00903c),
- REG(ANA_MIRRORPORTS, 0x009040),
- REG(ANA_EMIRRORPORTS, 0x009044),
- REG(ANA_FLOODING, 0x009048),
- REG(ANA_FLOODING_IPMC, 0x00904c),
- REG(ANA_SFLOW_CFG, 0x009050),
- REG(ANA_PORT_MODE, 0x009080),
- REG(ANA_PGID_PGID, 0x008c00),
- REG(ANA_TABLES_ANMOVED, 0x008b30),
- REG(ANA_TABLES_MACHDATA, 0x008b34),
- REG(ANA_TABLES_MACLDATA, 0x008b38),
- REG(ANA_TABLES_MACACCESS, 0x008b3c),
- REG(ANA_TABLES_MACTINDX, 0x008b40),
- REG(ANA_TABLES_VLANACCESS, 0x008b44),
- REG(ANA_TABLES_VLANTIDX, 0x008b48),
- REG(ANA_TABLES_ISDXACCESS, 0x008b4c),
- REG(ANA_TABLES_ISDXTIDX, 0x008b50),
- REG(ANA_TABLES_ENTRYLIM, 0x008b00),
- REG(ANA_TABLES_PTP_ID_HIGH, 0x008b54),
- REG(ANA_TABLES_PTP_ID_LOW, 0x008b58),
- REG(ANA_MSTI_STATE, 0x008e00),
- REG(ANA_PORT_VLAN_CFG, 0x007000),
- REG(ANA_PORT_DROP_CFG, 0x007004),
- REG(ANA_PORT_QOS_CFG, 0x007008),
- REG(ANA_PORT_VCAP_CFG, 0x00700c),
- REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x007010),
- REG(ANA_PORT_VCAP_S2_CFG, 0x00701c),
- REG(ANA_PORT_PCP_DEI_MAP, 0x007020),
- REG(ANA_PORT_CPU_FWD_CFG, 0x007060),
- REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x007064),
- REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x007068),
- REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00706c),
- REG(ANA_PORT_PORT_CFG, 0x007070),
- REG(ANA_PORT_POL_CFG, 0x007074),
- REG(ANA_PORT_PTP_CFG, 0x007078),
- REG(ANA_PORT_PTP_DLY1_CFG, 0x00707c),
- REG(ANA_OAM_UPM_LM_CNT, 0x007c00),
- REG(ANA_PORT_PTP_DLY2_CFG, 0x007080),
- REG(ANA_PFC_PFC_CFG, 0x008800),
- REG(ANA_PFC_PFC_TIMER, 0x008804),
- REG(ANA_IPT_OAM_MEP_CFG, 0x008000),
- REG(ANA_IPT_IPT, 0x008004),
- REG(ANA_PPT_PPT, 0x008ac0),
- REG(ANA_FID_MAP_FID_MAP, 0x000000),
- REG(ANA_AGGR_CFG, 0x0090b4),
- REG(ANA_CPUQ_CFG, 0x0090b8),
- REG(ANA_CPUQ_CFG2, 0x0090bc),
- REG(ANA_CPUQ_8021_CFG, 0x0090c0),
- REG(ANA_DSCP_CFG, 0x009100),
- REG(ANA_DSCP_REWR_CFG, 0x009200),
- REG(ANA_VCAP_RNG_TYPE_CFG, 0x009240),
- REG(ANA_VCAP_RNG_VAL_CFG, 0x009260),
- REG(ANA_VRAP_CFG, 0x009280),
- REG(ANA_VRAP_HDR_DATA, 0x009284),
- REG(ANA_VRAP_HDR_MASK, 0x009288),
- REG(ANA_DISCARD_CFG, 0x00928c),
- REG(ANA_FID_CFG, 0x009290),
- REG(ANA_POL_PIR_CFG, 0x004000),
- REG(ANA_POL_CIR_CFG, 0x004004),
- REG(ANA_POL_MODE_CFG, 0x004008),
- REG(ANA_POL_PIR_STATE, 0x00400c),
- REG(ANA_POL_CIR_STATE, 0x004010),
- REG(ANA_POL_STATE, 0x004014),
- REG(ANA_POL_FLOWC, 0x008b80),
- REG(ANA_POL_HYST, 0x008bec),
- REG(ANA_POL_MISC_CFG, 0x008bf0),
-};
-
-static const u32 ocelot_qs_regmap[] = {
- REG(QS_XTR_GRP_CFG, 0x000000),
- REG(QS_XTR_RD, 0x000008),
- REG(QS_XTR_FRM_PRUNING, 0x000010),
- REG(QS_XTR_FLUSH, 0x000018),
- REG(QS_XTR_DATA_PRESENT, 0x00001c),
- REG(QS_XTR_CFG, 0x000020),
- REG(QS_INJ_GRP_CFG, 0x000024),
- REG(QS_INJ_WR, 0x00002c),
- REG(QS_INJ_CTRL, 0x000034),
- REG(QS_INJ_STATUS, 0x00003c),
- REG(QS_INJ_ERR, 0x000040),
- REG(QS_INH_DBG, 0x000048),
-};
-
-static const u32 ocelot_qsys_regmap[] = {
- REG(QSYS_PORT_MODE, 0x011200),
- REG(QSYS_SWITCH_PORT_MODE, 0x011234),
- REG(QSYS_STAT_CNT_CFG, 0x011264),
- REG(QSYS_EEE_CFG, 0x011268),
- REG(QSYS_EEE_THRES, 0x011294),
- REG(QSYS_IGR_NO_SHARING, 0x011298),
- REG(QSYS_EGR_NO_SHARING, 0x01129c),
- REG(QSYS_SW_STATUS, 0x0112a0),
- REG(QSYS_EXT_CPU_CFG, 0x0112d0),
- REG(QSYS_PAD_CFG, 0x0112d4),
- REG(QSYS_CPU_GROUP_MAP, 0x0112d8),
- REG(QSYS_QMAP, 0x0112dc),
- REG(QSYS_ISDX_SGRP, 0x011400),
- REG(QSYS_TIMED_FRAME_ENTRY, 0x014000),
- REG(QSYS_TFRM_MISC, 0x011310),
- REG(QSYS_TFRM_PORT_DLY, 0x011314),
- REG(QSYS_TFRM_TIMER_CFG_1, 0x011318),
- REG(QSYS_TFRM_TIMER_CFG_2, 0x01131c),
- REG(QSYS_TFRM_TIMER_CFG_3, 0x011320),
- REG(QSYS_TFRM_TIMER_CFG_4, 0x011324),
- REG(QSYS_TFRM_TIMER_CFG_5, 0x011328),
- REG(QSYS_TFRM_TIMER_CFG_6, 0x01132c),
- REG(QSYS_TFRM_TIMER_CFG_7, 0x011330),
- REG(QSYS_TFRM_TIMER_CFG_8, 0x011334),
- REG(QSYS_RED_PROFILE, 0x011338),
- REG(QSYS_RES_QOS_MODE, 0x011378),
- REG(QSYS_RES_CFG, 0x012000),
- REG(QSYS_RES_STAT, 0x012004),
- REG(QSYS_EGR_DROP_MODE, 0x01137c),
- REG(QSYS_EQ_CTRL, 0x011380),
- REG(QSYS_EVENTS_CORE, 0x011384),
- REG(QSYS_CIR_CFG, 0x000000),
- REG(QSYS_EIR_CFG, 0x000004),
- REG(QSYS_SE_CFG, 0x000008),
- REG(QSYS_SE_DWRR_CFG, 0x00000c),
- REG(QSYS_SE_CONNECT, 0x00003c),
- REG(QSYS_SE_DLB_SENSE, 0x000040),
- REG(QSYS_CIR_STATE, 0x000044),
- REG(QSYS_EIR_STATE, 0x000048),
- REG(QSYS_SE_STATE, 0x00004c),
- REG(QSYS_HSCH_MISC_CFG, 0x011388),
-};
-
-static const u32 ocelot_rew_regmap[] = {
- REG(REW_PORT_VLAN_CFG, 0x000000),
- REG(REW_TAG_CFG, 0x000004),
- REG(REW_PORT_CFG, 0x000008),
- REG(REW_DSCP_CFG, 0x00000c),
- REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010),
- REG(REW_PTP_CFG, 0x000050),
- REG(REW_PTP_DLY1_CFG, 0x000054),
- REG(REW_DSCP_REMAP_DP1_CFG, 0x000690),
- REG(REW_DSCP_REMAP_CFG, 0x000790),
- REG(REW_STAT_CFG, 0x000890),
- REG(REW_PPT, 0x000680),
-};
-
-static const u32 ocelot_sys_regmap[] = {
- REG(SYS_COUNT_RX_OCTETS, 0x000000),
- REG(SYS_COUNT_RX_UNICAST, 0x000004),
- REG(SYS_COUNT_RX_MULTICAST, 0x000008),
- REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
- REG(SYS_COUNT_RX_SHORTS, 0x000010),
- REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
- REG(SYS_COUNT_RX_JABBERS, 0x000018),
- REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
- REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
- REG(SYS_COUNT_RX_64, 0x000024),
- REG(SYS_COUNT_RX_65_127, 0x000028),
- REG(SYS_COUNT_RX_128_255, 0x00002c),
- REG(SYS_COUNT_RX_256_1023, 0x000030),
- REG(SYS_COUNT_RX_1024_1526, 0x000034),
- REG(SYS_COUNT_RX_1527_MAX, 0x000038),
- REG(SYS_COUNT_RX_PAUSE, 0x00003c),
- REG(SYS_COUNT_RX_CONTROL, 0x000040),
- REG(SYS_COUNT_RX_LONGS, 0x000044),
- REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x000048),
- REG(SYS_COUNT_TX_OCTETS, 0x000100),
- REG(SYS_COUNT_TX_UNICAST, 0x000104),
- REG(SYS_COUNT_TX_MULTICAST, 0x000108),
- REG(SYS_COUNT_TX_BROADCAST, 0x00010c),
- REG(SYS_COUNT_TX_COLLISION, 0x000110),
- REG(SYS_COUNT_TX_DROPS, 0x000114),
- REG(SYS_COUNT_TX_PAUSE, 0x000118),
- REG(SYS_COUNT_TX_64, 0x00011c),
- REG(SYS_COUNT_TX_65_127, 0x000120),
- REG(SYS_COUNT_TX_128_511, 0x000124),
- REG(SYS_COUNT_TX_512_1023, 0x000128),
- REG(SYS_COUNT_TX_1024_1526, 0x00012c),
- REG(SYS_COUNT_TX_1527_MAX, 0x000130),
- REG(SYS_COUNT_TX_AGING, 0x000170),
- REG(SYS_RESET_CFG, 0x000508),
- REG(SYS_CMID, 0x00050c),
- REG(SYS_VLAN_ETYPE_CFG, 0x000510),
- REG(SYS_PORT_MODE, 0x000514),
- REG(SYS_FRONT_PORT_MODE, 0x000548),
- REG(SYS_FRM_AGING, 0x000574),
- REG(SYS_STAT_CFG, 0x000578),
- REG(SYS_SW_STATUS, 0x00057c),
- REG(SYS_MISC_CFG, 0x0005ac),
- REG(SYS_REW_MAC_HIGH_CFG, 0x0005b0),
- REG(SYS_REW_MAC_LOW_CFG, 0x0005dc),
- REG(SYS_CM_ADDR, 0x000500),
- REG(SYS_CM_DATA, 0x000504),
- REG(SYS_PAUSE_CFG, 0x000608),
- REG(SYS_PAUSE_TOT_CFG, 0x000638),
- REG(SYS_ATOP, 0x00063c),
- REG(SYS_ATOP_TOT_CFG, 0x00066c),
- REG(SYS_MAC_FC_CFG, 0x000670),
- REG(SYS_MMGT, 0x00069c),
- REG(SYS_MMGT_FAST, 0x0006a0),
- REG(SYS_EVENTS_DIF, 0x0006a4),
- REG(SYS_EVENTS_CORE, 0x0006b4),
- REG(SYS_CNT, 0x000000),
- REG(SYS_PTP_STATUS, 0x0006b8),
- REG(SYS_PTP_TXSTAMP, 0x0006bc),
- REG(SYS_PTP_NXT, 0x0006c0),
- REG(SYS_PTP_CFG, 0x0006c4),
-};
-
-static const u32 ocelot_vcap_regmap[] = {
- /* VCAP_CORE_CFG */
- REG(VCAP_CORE_UPDATE_CTRL, 0x000000),
- REG(VCAP_CORE_MV_CFG, 0x000004),
- /* VCAP_CORE_CACHE */
- REG(VCAP_CACHE_ENTRY_DAT, 0x000008),
- REG(VCAP_CACHE_MASK_DAT, 0x000108),
- REG(VCAP_CACHE_ACTION_DAT, 0x000208),
- REG(VCAP_CACHE_CNT_DAT, 0x000308),
- REG(VCAP_CACHE_TG_DAT, 0x000388),
- /* VCAP_CONST */
- REG(VCAP_CONST_VCAP_VER, 0x000398),
- REG(VCAP_CONST_ENTRY_WIDTH, 0x00039c),
- REG(VCAP_CONST_ENTRY_CNT, 0x0003a0),
- REG(VCAP_CONST_ENTRY_SWCNT, 0x0003a4),
- REG(VCAP_CONST_ENTRY_TG_WIDTH, 0x0003a8),
- REG(VCAP_CONST_ACTION_DEF_CNT, 0x0003ac),
- REG(VCAP_CONST_ACTION_WIDTH, 0x0003b0),
- REG(VCAP_CONST_CNT_WIDTH, 0x0003b4),
- REG(VCAP_CONST_CORE_CNT, 0x0003b8),
- REG(VCAP_CONST_IF_CNT, 0x0003bc),
-};
-
-static const u32 ocelot_ptp_regmap[] = {
- REG(PTP_PIN_CFG, 0x000000),
- REG(PTP_PIN_TOD_SEC_MSB, 0x000004),
- REG(PTP_PIN_TOD_SEC_LSB, 0x000008),
- REG(PTP_PIN_TOD_NSEC, 0x00000c),
- REG(PTP_PIN_WF_HIGH_PERIOD, 0x000014),
- REG(PTP_PIN_WF_LOW_PERIOD, 0x000018),
- REG(PTP_CFG_MISC, 0x0000a0),
- REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4),
- REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8),
-};
-
-static const u32 ocelot_dev_gmii_regmap[] = {
- REG(DEV_CLOCK_CFG, 0x0),
- REG(DEV_PORT_MISC, 0x4),
- REG(DEV_EVENTS, 0x8),
- REG(DEV_EEE_CFG, 0xc),
- REG(DEV_RX_PATH_DELAY, 0x10),
- REG(DEV_TX_PATH_DELAY, 0x14),
- REG(DEV_PTP_PREDICT_CFG, 0x18),
- REG(DEV_MAC_ENA_CFG, 0x1c),
- REG(DEV_MAC_MODE_CFG, 0x20),
- REG(DEV_MAC_MAXLEN_CFG, 0x24),
- REG(DEV_MAC_TAGS_CFG, 0x28),
- REG(DEV_MAC_ADV_CHK_CFG, 0x2c),
- REG(DEV_MAC_IFG_CFG, 0x30),
- REG(DEV_MAC_HDX_CFG, 0x34),
- REG(DEV_MAC_DBG_CFG, 0x38),
- REG(DEV_MAC_FC_MAC_LOW_CFG, 0x3c),
- REG(DEV_MAC_FC_MAC_HIGH_CFG, 0x40),
- REG(DEV_MAC_STICKY, 0x44),
- REG(PCS1G_CFG, 0x48),
- REG(PCS1G_MODE_CFG, 0x4c),
- REG(PCS1G_SD_CFG, 0x50),
- REG(PCS1G_ANEG_CFG, 0x54),
- REG(PCS1G_ANEG_NP_CFG, 0x58),
- REG(PCS1G_LB_CFG, 0x5c),
- REG(PCS1G_DBG_CFG, 0x60),
- REG(PCS1G_CDET_CFG, 0x64),
- REG(PCS1G_ANEG_STATUS, 0x68),
- REG(PCS1G_ANEG_NP_STATUS, 0x6c),
- REG(PCS1G_LINK_STATUS, 0x70),
- REG(PCS1G_LINK_DOWN_CNT, 0x74),
- REG(PCS1G_STICKY, 0x78),
- REG(PCS1G_DEBUG_STATUS, 0x7c),
- REG(PCS1G_LPI_CFG, 0x80),
- REG(PCS1G_LPI_WAKE_ERROR_CNT, 0x84),
- REG(PCS1G_LPI_STATUS, 0x88),
- REG(PCS1G_TSTPAT_MODE_CFG, 0x8c),
- REG(PCS1G_TSTPAT_STATUS, 0x90),
- REG(DEV_PCS_FX100_CFG, 0x94),
- REG(DEV_PCS_FX100_STATUS, 0x98),
-};
-
static const u32 *ocelot_regmap[TARGET_MAX] = {
- [ANA] = ocelot_ana_regmap,
- [QS] = ocelot_qs_regmap,
- [QSYS] = ocelot_qsys_regmap,
- [REW] = ocelot_rew_regmap,
- [SYS] = ocelot_sys_regmap,
- [S0] = ocelot_vcap_regmap,
- [S1] = ocelot_vcap_regmap,
- [S2] = ocelot_vcap_regmap,
- [PTP] = ocelot_ptp_regmap,
- [DEV_GMII] = ocelot_dev_gmii_regmap,
+ [ANA] = vsc7514_ana_regmap,
+ [QS] = vsc7514_qs_regmap,
+ [QSYS] = vsc7514_qsys_regmap,
+ [REW] = vsc7514_rew_regmap,
+ [SYS] = vsc7514_sys_regmap,
+ [S0] = vsc7514_vcap_regmap,
+ [S1] = vsc7514_vcap_regmap,
+ [S2] = vsc7514_vcap_regmap,
+ [PTP] = vsc7514_ptp_regmap,
+ [DEV_GMII] = vsc7514_dev_gmii_regmap,
};
static const struct reg_field ocelot_regfields[REGFIELD_MAX] = {
@@ -636,211 +344,6 @@ static const struct ocelot_ops ocelot_ops = {
.netdev_to_port = ocelot_netdev_to_port,
};
-static const struct vcap_field vsc7514_vcap_es0_keys[] = {
- [VCAP_ES0_EGR_PORT] = { 0, 4},
- [VCAP_ES0_IGR_PORT] = { 4, 4},
- [VCAP_ES0_RSV] = { 8, 2},
- [VCAP_ES0_L2_MC] = { 10, 1},
- [VCAP_ES0_L2_BC] = { 11, 1},
- [VCAP_ES0_VID] = { 12, 12},
- [VCAP_ES0_DP] = { 24, 1},
- [VCAP_ES0_PCP] = { 25, 3},
-};
-
-static const struct vcap_field vsc7514_vcap_es0_actions[] = {
- [VCAP_ES0_ACT_PUSH_OUTER_TAG] = { 0, 2},
- [VCAP_ES0_ACT_PUSH_INNER_TAG] = { 2, 1},
- [VCAP_ES0_ACT_TAG_A_TPID_SEL] = { 3, 2},
- [VCAP_ES0_ACT_TAG_A_VID_SEL] = { 5, 1},
- [VCAP_ES0_ACT_TAG_A_PCP_SEL] = { 6, 2},
- [VCAP_ES0_ACT_TAG_A_DEI_SEL] = { 8, 2},
- [VCAP_ES0_ACT_TAG_B_TPID_SEL] = { 10, 2},
- [VCAP_ES0_ACT_TAG_B_VID_SEL] = { 12, 1},
- [VCAP_ES0_ACT_TAG_B_PCP_SEL] = { 13, 2},
- [VCAP_ES0_ACT_TAG_B_DEI_SEL] = { 15, 2},
- [VCAP_ES0_ACT_VID_A_VAL] = { 17, 12},
- [VCAP_ES0_ACT_PCP_A_VAL] = { 29, 3},
- [VCAP_ES0_ACT_DEI_A_VAL] = { 32, 1},
- [VCAP_ES0_ACT_VID_B_VAL] = { 33, 12},
- [VCAP_ES0_ACT_PCP_B_VAL] = { 45, 3},
- [VCAP_ES0_ACT_DEI_B_VAL] = { 48, 1},
- [VCAP_ES0_ACT_RSV] = { 49, 24},
- [VCAP_ES0_ACT_HIT_STICKY] = { 73, 1},
-};
-
-static const struct vcap_field vsc7514_vcap_is1_keys[] = {
- [VCAP_IS1_HK_TYPE] = { 0, 1},
- [VCAP_IS1_HK_LOOKUP] = { 1, 2},
- [VCAP_IS1_HK_IGR_PORT_MASK] = { 3, 12},
- [VCAP_IS1_HK_RSV] = { 15, 9},
- [VCAP_IS1_HK_OAM_Y1731] = { 24, 1},
- [VCAP_IS1_HK_L2_MC] = { 25, 1},
- [VCAP_IS1_HK_L2_BC] = { 26, 1},
- [VCAP_IS1_HK_IP_MC] = { 27, 1},
- [VCAP_IS1_HK_VLAN_TAGGED] = { 28, 1},
- [VCAP_IS1_HK_VLAN_DBL_TAGGED] = { 29, 1},
- [VCAP_IS1_HK_TPID] = { 30, 1},
- [VCAP_IS1_HK_VID] = { 31, 12},
- [VCAP_IS1_HK_DEI] = { 43, 1},
- [VCAP_IS1_HK_PCP] = { 44, 3},
- /* Specific Fields for IS1 Half Key S1_NORMAL */
- [VCAP_IS1_HK_L2_SMAC] = { 47, 48},
- [VCAP_IS1_HK_ETYPE_LEN] = { 95, 1},
- [VCAP_IS1_HK_ETYPE] = { 96, 16},
- [VCAP_IS1_HK_IP_SNAP] = {112, 1},
- [VCAP_IS1_HK_IP4] = {113, 1},
- /* Layer-3 Information */
- [VCAP_IS1_HK_L3_FRAGMENT] = {114, 1},
- [VCAP_IS1_HK_L3_FRAG_OFS_GT0] = {115, 1},
- [VCAP_IS1_HK_L3_OPTIONS] = {116, 1},
- [VCAP_IS1_HK_L3_DSCP] = {117, 6},
- [VCAP_IS1_HK_L3_IP4_SIP] = {123, 32},
- /* Layer-4 Information */
- [VCAP_IS1_HK_TCP_UDP] = {155, 1},
- [VCAP_IS1_HK_TCP] = {156, 1},
- [VCAP_IS1_HK_L4_SPORT] = {157, 16},
- [VCAP_IS1_HK_L4_RNG] = {173, 8},
- /* Specific Fields for IS1 Half Key S1_5TUPLE_IP4 */
- [VCAP_IS1_HK_IP4_INNER_TPID] = { 47, 1},
- [VCAP_IS1_HK_IP4_INNER_VID] = { 48, 12},
- [VCAP_IS1_HK_IP4_INNER_DEI] = { 60, 1},
- [VCAP_IS1_HK_IP4_INNER_PCP] = { 61, 3},
- [VCAP_IS1_HK_IP4_IP4] = { 64, 1},
- [VCAP_IS1_HK_IP4_L3_FRAGMENT] = { 65, 1},
- [VCAP_IS1_HK_IP4_L3_FRAG_OFS_GT0] = { 66, 1},
- [VCAP_IS1_HK_IP4_L3_OPTIONS] = { 67, 1},
- [VCAP_IS1_HK_IP4_L3_DSCP] = { 68, 6},
- [VCAP_IS1_HK_IP4_L3_IP4_DIP] = { 74, 32},
- [VCAP_IS1_HK_IP4_L3_IP4_SIP] = {106, 32},
- [VCAP_IS1_HK_IP4_L3_PROTO] = {138, 8},
- [VCAP_IS1_HK_IP4_TCP_UDP] = {146, 1},
- [VCAP_IS1_HK_IP4_TCP] = {147, 1},
- [VCAP_IS1_HK_IP4_L4_RNG] = {148, 8},
- [VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE] = {156, 32},
-};
-
-static const struct vcap_field vsc7514_vcap_is1_actions[] = {
- [VCAP_IS1_ACT_DSCP_ENA] = { 0, 1},
- [VCAP_IS1_ACT_DSCP_VAL] = { 1, 6},
- [VCAP_IS1_ACT_QOS_ENA] = { 7, 1},
- [VCAP_IS1_ACT_QOS_VAL] = { 8, 3},
- [VCAP_IS1_ACT_DP_ENA] = { 11, 1},
- [VCAP_IS1_ACT_DP_VAL] = { 12, 1},
- [VCAP_IS1_ACT_PAG_OVERRIDE_MASK] = { 13, 8},
- [VCAP_IS1_ACT_PAG_VAL] = { 21, 8},
- [VCAP_IS1_ACT_RSV] = { 29, 9},
- /* The fields below are incorrectly shifted by 2 in the manual */
- [VCAP_IS1_ACT_VID_REPLACE_ENA] = { 38, 1},
- [VCAP_IS1_ACT_VID_ADD_VAL] = { 39, 12},
- [VCAP_IS1_ACT_FID_SEL] = { 51, 2},
- [VCAP_IS1_ACT_FID_VAL] = { 53, 13},
- [VCAP_IS1_ACT_PCP_DEI_ENA] = { 66, 1},
- [VCAP_IS1_ACT_PCP_VAL] = { 67, 3},
- [VCAP_IS1_ACT_DEI_VAL] = { 70, 1},
- [VCAP_IS1_ACT_VLAN_POP_CNT_ENA] = { 71, 1},
- [VCAP_IS1_ACT_VLAN_POP_CNT] = { 72, 2},
- [VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA] = { 74, 4},
- [VCAP_IS1_ACT_HIT_STICKY] = { 78, 1},
-};
-
-static const struct vcap_field vsc7514_vcap_is2_keys[] = {
- /* Common: 46 bits */
- [VCAP_IS2_TYPE] = { 0, 4},
- [VCAP_IS2_HK_FIRST] = { 4, 1},
- [VCAP_IS2_HK_PAG] = { 5, 8},
- [VCAP_IS2_HK_IGR_PORT_MASK] = { 13, 12},
- [VCAP_IS2_HK_RSV2] = { 25, 1},
- [VCAP_IS2_HK_HOST_MATCH] = { 26, 1},
- [VCAP_IS2_HK_L2_MC] = { 27, 1},
- [VCAP_IS2_HK_L2_BC] = { 28, 1},
- [VCAP_IS2_HK_VLAN_TAGGED] = { 29, 1},
- [VCAP_IS2_HK_VID] = { 30, 12},
- [VCAP_IS2_HK_DEI] = { 42, 1},
- [VCAP_IS2_HK_PCP] = { 43, 3},
- /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */
- [VCAP_IS2_HK_L2_DMAC] = { 46, 48},
- [VCAP_IS2_HK_L2_SMAC] = { 94, 48},
- /* MAC_ETYPE (TYPE=000) */
- [VCAP_IS2_HK_MAC_ETYPE_ETYPE] = {142, 16},
- [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0] = {158, 16},
- [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1] = {174, 8},
- [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2] = {182, 3},
- /* MAC_LLC (TYPE=001) */
- [VCAP_IS2_HK_MAC_LLC_L2_LLC] = {142, 40},
- /* MAC_SNAP (TYPE=010) */
- [VCAP_IS2_HK_MAC_SNAP_L2_SNAP] = {142, 40},
- /* MAC_ARP (TYPE=011) */
- [VCAP_IS2_HK_MAC_ARP_SMAC] = { 46, 48},
- [VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK] = { 94, 1},
- [VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK] = { 95, 1},
- [VCAP_IS2_HK_MAC_ARP_LEN_OK] = { 96, 1},
- [VCAP_IS2_HK_MAC_ARP_TARGET_MATCH] = { 97, 1},
- [VCAP_IS2_HK_MAC_ARP_SENDER_MATCH] = { 98, 1},
- [VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN] = { 99, 1},
- [VCAP_IS2_HK_MAC_ARP_OPCODE] = {100, 2},
- [VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP] = {102, 32},
- [VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP] = {134, 32},
- [VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP] = {166, 1},
- /* IP4_TCP_UDP / IP4_OTHER common */
- [VCAP_IS2_HK_IP4] = { 46, 1},
- [VCAP_IS2_HK_L3_FRAGMENT] = { 47, 1},
- [VCAP_IS2_HK_L3_FRAG_OFS_GT0] = { 48, 1},
- [VCAP_IS2_HK_L3_OPTIONS] = { 49, 1},
- [VCAP_IS2_HK_IP4_L3_TTL_GT0] = { 50, 1},
- [VCAP_IS2_HK_L3_TOS] = { 51, 8},
- [VCAP_IS2_HK_L3_IP4_DIP] = { 59, 32},
- [VCAP_IS2_HK_L3_IP4_SIP] = { 91, 32},
- [VCAP_IS2_HK_DIP_EQ_SIP] = {123, 1},
- /* IP4_TCP_UDP (TYPE=100) */
- [VCAP_IS2_HK_TCP] = {124, 1},
- [VCAP_IS2_HK_L4_DPORT] = {125, 16},
- [VCAP_IS2_HK_L4_SPORT] = {141, 16},
- [VCAP_IS2_HK_L4_RNG] = {157, 8},
- [VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = {165, 1},
- [VCAP_IS2_HK_L4_SEQUENCE_EQ0] = {166, 1},
- [VCAP_IS2_HK_L4_FIN] = {167, 1},
- [VCAP_IS2_HK_L4_SYN] = {168, 1},
- [VCAP_IS2_HK_L4_RST] = {169, 1},
- [VCAP_IS2_HK_L4_PSH] = {170, 1},
- [VCAP_IS2_HK_L4_ACK] = {171, 1},
- [VCAP_IS2_HK_L4_URG] = {172, 1},
- [VCAP_IS2_HK_L4_1588_DOM] = {173, 8},
- [VCAP_IS2_HK_L4_1588_VER] = {181, 4},
- /* IP4_OTHER (TYPE=101) */
- [VCAP_IS2_HK_IP4_L3_PROTO] = {124, 8},
- [VCAP_IS2_HK_L3_PAYLOAD] = {132, 56},
- /* IP6_STD (TYPE=110) */
- [VCAP_IS2_HK_IP6_L3_TTL_GT0] = { 46, 1},
- [VCAP_IS2_HK_L3_IP6_SIP] = { 47, 128},
- [VCAP_IS2_HK_IP6_L3_PROTO] = {175, 8},
- /* OAM (TYPE=111) */
- [VCAP_IS2_HK_OAM_MEL_FLAGS] = {142, 7},
- [VCAP_IS2_HK_OAM_VER] = {149, 5},
- [VCAP_IS2_HK_OAM_OPCODE] = {154, 8},
- [VCAP_IS2_HK_OAM_FLAGS] = {162, 8},
- [VCAP_IS2_HK_OAM_MEPID] = {170, 16},
- [VCAP_IS2_HK_OAM_CCM_CNTS_EQ0] = {186, 1},
- [VCAP_IS2_HK_OAM_IS_Y1731] = {187, 1},
-};
-
-static const struct vcap_field vsc7514_vcap_is2_actions[] = {
- [VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1},
- [VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1},
- [VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3},
- [VCAP_IS2_ACT_MASK_MODE] = { 5, 2},
- [VCAP_IS2_ACT_MIRROR_ENA] = { 7, 1},
- [VCAP_IS2_ACT_LRN_DIS] = { 8, 1},
- [VCAP_IS2_ACT_POLICE_ENA] = { 9, 1},
- [VCAP_IS2_ACT_POLICE_IDX] = { 10, 9},
- [VCAP_IS2_ACT_POLICE_VCAP_ONLY] = { 19, 1},
- [VCAP_IS2_ACT_PORT_MASK] = { 20, 11},
- [VCAP_IS2_ACT_REW_OP] = { 31, 9},
- [VCAP_IS2_ACT_SMAC_REPLACE_ENA] = { 40, 1},
- [VCAP_IS2_ACT_RSV] = { 41, 2},
- [VCAP_IS2_ACT_ACL_ID] = { 43, 6},
- [VCAP_IS2_ACT_HIT_CNT] = { 49, 32},
-};
-
static struct vcap_props vsc7514_vcap_props[] = {
[VCAP_ES0] = {
.action_type_width = 0,
@@ -1048,6 +551,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
{ S1, "s1" },
{ S2, "s2" },
{ PTP, "ptp", 1 },
+ { FDMA, "fdma", 1 },
};
if (!np && !pdev->dev.platform_data)
@@ -1083,6 +587,9 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->targets[io_target[i].id] = target;
}
+ if (ocelot->targets[FDMA])
+ ocelot_fdma_init(pdev, ocelot);
+
hsio = syscon_regmap_lookup_by_compatible("mscc,ocelot-hsio");
if (IS_ERR(hsio)) {
dev_err(&pdev->dev, "missing hsio syscon\n");
@@ -1146,6 +653,9 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
if (err)
goto out_ocelot_devlink_unregister;
+ if (ocelot->fdma)
+ ocelot_fdma_start(ocelot);
+
err = ocelot_devlink_sb_register(ocelot);
if (err)
goto out_ocelot_release_ports;
@@ -1186,6 +696,8 @@ static int mscc_ocelot_remove(struct platform_device *pdev)
{
struct ocelot *ocelot = platform_get_drvdata(pdev);
+ if (ocelot->fdma)
+ ocelot_fdma_deinit(ocelot);
devlink_unregister(ocelot->devlink);
ocelot_deinit_timestamp(ocelot);
ocelot_devlink_sb_unregister(ocelot);
diff --git a/drivers/net/ethernet/mscc/vsc7514_regs.c b/drivers/net/ethernet/mscc/vsc7514_regs.c
new file mode 100644
index 000000000000..c2af4eb8ca5d
--- /dev/null
+++ b/drivers/net/ethernet/mscc/vsc7514_regs.c
@@ -0,0 +1,523 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ * Copyright (c) 2021 Innovative Advantage
+ */
+#include <soc/mscc/ocelot_vcap.h>
+#include <soc/mscc/vsc7514_regs.h>
+#include "ocelot.h"
+
+const u32 vsc7514_ana_regmap[] = {
+ REG(ANA_ADVLEARN, 0x009000),
+ REG(ANA_VLANMASK, 0x009004),
+ REG(ANA_PORT_B_DOMAIN, 0x009008),
+ REG(ANA_ANAGEFIL, 0x00900c),
+ REG(ANA_ANEVENTS, 0x009010),
+ REG(ANA_STORMLIMIT_BURST, 0x009014),
+ REG(ANA_STORMLIMIT_CFG, 0x009018),
+ REG(ANA_ISOLATED_PORTS, 0x009028),
+ REG(ANA_COMMUNITY_PORTS, 0x00902c),
+ REG(ANA_AUTOAGE, 0x009030),
+ REG(ANA_MACTOPTIONS, 0x009034),
+ REG(ANA_LEARNDISC, 0x009038),
+ REG(ANA_AGENCTRL, 0x00903c),
+ REG(ANA_MIRRORPORTS, 0x009040),
+ REG(ANA_EMIRRORPORTS, 0x009044),
+ REG(ANA_FLOODING, 0x009048),
+ REG(ANA_FLOODING_IPMC, 0x00904c),
+ REG(ANA_SFLOW_CFG, 0x009050),
+ REG(ANA_PORT_MODE, 0x009080),
+ REG(ANA_PGID_PGID, 0x008c00),
+ REG(ANA_TABLES_ANMOVED, 0x008b30),
+ REG(ANA_TABLES_MACHDATA, 0x008b34),
+ REG(ANA_TABLES_MACLDATA, 0x008b38),
+ REG(ANA_TABLES_MACACCESS, 0x008b3c),
+ REG(ANA_TABLES_MACTINDX, 0x008b40),
+ REG(ANA_TABLES_VLANACCESS, 0x008b44),
+ REG(ANA_TABLES_VLANTIDX, 0x008b48),
+ REG(ANA_TABLES_ISDXACCESS, 0x008b4c),
+ REG(ANA_TABLES_ISDXTIDX, 0x008b50),
+ REG(ANA_TABLES_ENTRYLIM, 0x008b00),
+ REG(ANA_TABLES_PTP_ID_HIGH, 0x008b54),
+ REG(ANA_TABLES_PTP_ID_LOW, 0x008b58),
+ REG(ANA_MSTI_STATE, 0x008e00),
+ REG(ANA_PORT_VLAN_CFG, 0x007000),
+ REG(ANA_PORT_DROP_CFG, 0x007004),
+ REG(ANA_PORT_QOS_CFG, 0x007008),
+ REG(ANA_PORT_VCAP_CFG, 0x00700c),
+ REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x007010),
+ REG(ANA_PORT_VCAP_S2_CFG, 0x00701c),
+ REG(ANA_PORT_PCP_DEI_MAP, 0x007020),
+ REG(ANA_PORT_CPU_FWD_CFG, 0x007060),
+ REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x007064),
+ REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x007068),
+ REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00706c),
+ REG(ANA_PORT_PORT_CFG, 0x007070),
+ REG(ANA_PORT_POL_CFG, 0x007074),
+ REG(ANA_PORT_PTP_CFG, 0x007078),
+ REG(ANA_PORT_PTP_DLY1_CFG, 0x00707c),
+ REG(ANA_OAM_UPM_LM_CNT, 0x007c00),
+ REG(ANA_PORT_PTP_DLY2_CFG, 0x007080),
+ REG(ANA_PFC_PFC_CFG, 0x008800),
+ REG(ANA_PFC_PFC_TIMER, 0x008804),
+ REG(ANA_IPT_OAM_MEP_CFG, 0x008000),
+ REG(ANA_IPT_IPT, 0x008004),
+ REG(ANA_PPT_PPT, 0x008ac0),
+ REG(ANA_FID_MAP_FID_MAP, 0x000000),
+ REG(ANA_AGGR_CFG, 0x0090b4),
+ REG(ANA_CPUQ_CFG, 0x0090b8),
+ REG(ANA_CPUQ_CFG2, 0x0090bc),
+ REG(ANA_CPUQ_8021_CFG, 0x0090c0),
+ REG(ANA_DSCP_CFG, 0x009100),
+ REG(ANA_DSCP_REWR_CFG, 0x009200),
+ REG(ANA_VCAP_RNG_TYPE_CFG, 0x009240),
+ REG(ANA_VCAP_RNG_VAL_CFG, 0x009260),
+ REG(ANA_VRAP_CFG, 0x009280),
+ REG(ANA_VRAP_HDR_DATA, 0x009284),
+ REG(ANA_VRAP_HDR_MASK, 0x009288),
+ REG(ANA_DISCARD_CFG, 0x00928c),
+ REG(ANA_FID_CFG, 0x009290),
+ REG(ANA_POL_PIR_CFG, 0x004000),
+ REG(ANA_POL_CIR_CFG, 0x004004),
+ REG(ANA_POL_MODE_CFG, 0x004008),
+ REG(ANA_POL_PIR_STATE, 0x00400c),
+ REG(ANA_POL_CIR_STATE, 0x004010),
+ REG(ANA_POL_STATE, 0x004014),
+ REG(ANA_POL_FLOWC, 0x008b80),
+ REG(ANA_POL_HYST, 0x008bec),
+ REG(ANA_POL_MISC_CFG, 0x008bf0),
+};
+EXPORT_SYMBOL(vsc7514_ana_regmap);
+
+const u32 vsc7514_qs_regmap[] = {
+ REG(QS_XTR_GRP_CFG, 0x000000),
+ REG(QS_XTR_RD, 0x000008),
+ REG(QS_XTR_FRM_PRUNING, 0x000010),
+ REG(QS_XTR_FLUSH, 0x000018),
+ REG(QS_XTR_DATA_PRESENT, 0x00001c),
+ REG(QS_XTR_CFG, 0x000020),
+ REG(QS_INJ_GRP_CFG, 0x000024),
+ REG(QS_INJ_WR, 0x00002c),
+ REG(QS_INJ_CTRL, 0x000034),
+ REG(QS_INJ_STATUS, 0x00003c),
+ REG(QS_INJ_ERR, 0x000040),
+ REG(QS_INH_DBG, 0x000048),
+};
+EXPORT_SYMBOL(vsc7514_qs_regmap);
+
+const u32 vsc7514_qsys_regmap[] = {
+ REG(QSYS_PORT_MODE, 0x011200),
+ REG(QSYS_SWITCH_PORT_MODE, 0x011234),
+ REG(QSYS_STAT_CNT_CFG, 0x011264),
+ REG(QSYS_EEE_CFG, 0x011268),
+ REG(QSYS_EEE_THRES, 0x011294),
+ REG(QSYS_IGR_NO_SHARING, 0x011298),
+ REG(QSYS_EGR_NO_SHARING, 0x01129c),
+ REG(QSYS_SW_STATUS, 0x0112a0),
+ REG(QSYS_EXT_CPU_CFG, 0x0112d0),
+ REG(QSYS_PAD_CFG, 0x0112d4),
+ REG(QSYS_CPU_GROUP_MAP, 0x0112d8),
+ REG(QSYS_QMAP, 0x0112dc),
+ REG(QSYS_ISDX_SGRP, 0x011400),
+ REG(QSYS_TIMED_FRAME_ENTRY, 0x014000),
+ REG(QSYS_TFRM_MISC, 0x011310),
+ REG(QSYS_TFRM_PORT_DLY, 0x011314),
+ REG(QSYS_TFRM_TIMER_CFG_1, 0x011318),
+ REG(QSYS_TFRM_TIMER_CFG_2, 0x01131c),
+ REG(QSYS_TFRM_TIMER_CFG_3, 0x011320),
+ REG(QSYS_TFRM_TIMER_CFG_4, 0x011324),
+ REG(QSYS_TFRM_TIMER_CFG_5, 0x011328),
+ REG(QSYS_TFRM_TIMER_CFG_6, 0x01132c),
+ REG(QSYS_TFRM_TIMER_CFG_7, 0x011330),
+ REG(QSYS_TFRM_TIMER_CFG_8, 0x011334),
+ REG(QSYS_RED_PROFILE, 0x011338),
+ REG(QSYS_RES_QOS_MODE, 0x011378),
+ REG(QSYS_RES_CFG, 0x012000),
+ REG(QSYS_RES_STAT, 0x012004),
+ REG(QSYS_EGR_DROP_MODE, 0x01137c),
+ REG(QSYS_EQ_CTRL, 0x011380),
+ REG(QSYS_EVENTS_CORE, 0x011384),
+ REG(QSYS_CIR_CFG, 0x000000),
+ REG(QSYS_EIR_CFG, 0x000004),
+ REG(QSYS_SE_CFG, 0x000008),
+ REG(QSYS_SE_DWRR_CFG, 0x00000c),
+ REG(QSYS_SE_CONNECT, 0x00003c),
+ REG(QSYS_SE_DLB_SENSE, 0x000040),
+ REG(QSYS_CIR_STATE, 0x000044),
+ REG(QSYS_EIR_STATE, 0x000048),
+ REG(QSYS_SE_STATE, 0x00004c),
+ REG(QSYS_HSCH_MISC_CFG, 0x011388),
+};
+EXPORT_SYMBOL(vsc7514_qsys_regmap);
+
+const u32 vsc7514_rew_regmap[] = {
+ REG(REW_PORT_VLAN_CFG, 0x000000),
+ REG(REW_TAG_CFG, 0x000004),
+ REG(REW_PORT_CFG, 0x000008),
+ REG(REW_DSCP_CFG, 0x00000c),
+ REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010),
+ REG(REW_PTP_CFG, 0x000050),
+ REG(REW_PTP_DLY1_CFG, 0x000054),
+ REG(REW_DSCP_REMAP_DP1_CFG, 0x000690),
+ REG(REW_DSCP_REMAP_CFG, 0x000790),
+ REG(REW_STAT_CFG, 0x000890),
+ REG(REW_PPT, 0x000680),
+};
+EXPORT_SYMBOL(vsc7514_rew_regmap);
+
+const u32 vsc7514_sys_regmap[] = {
+ REG(SYS_COUNT_RX_OCTETS, 0x000000),
+ REG(SYS_COUNT_RX_UNICAST, 0x000004),
+ REG(SYS_COUNT_RX_MULTICAST, 0x000008),
+ REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
+ REG(SYS_COUNT_RX_SHORTS, 0x000010),
+ REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
+ REG(SYS_COUNT_RX_JABBERS, 0x000018),
+ REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
+ REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
+ REG(SYS_COUNT_RX_64, 0x000024),
+ REG(SYS_COUNT_RX_65_127, 0x000028),
+ REG(SYS_COUNT_RX_128_255, 0x00002c),
+ REG(SYS_COUNT_RX_256_1023, 0x000030),
+ REG(SYS_COUNT_RX_1024_1526, 0x000034),
+ REG(SYS_COUNT_RX_1527_MAX, 0x000038),
+ REG(SYS_COUNT_RX_PAUSE, 0x00003c),
+ REG(SYS_COUNT_RX_CONTROL, 0x000040),
+ REG(SYS_COUNT_RX_LONGS, 0x000044),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x000048),
+ REG(SYS_COUNT_TX_OCTETS, 0x000100),
+ REG(SYS_COUNT_TX_UNICAST, 0x000104),
+ REG(SYS_COUNT_TX_MULTICAST, 0x000108),
+ REG(SYS_COUNT_TX_BROADCAST, 0x00010c),
+ REG(SYS_COUNT_TX_COLLISION, 0x000110),
+ REG(SYS_COUNT_TX_DROPS, 0x000114),
+ REG(SYS_COUNT_TX_PAUSE, 0x000118),
+ REG(SYS_COUNT_TX_64, 0x00011c),
+ REG(SYS_COUNT_TX_65_127, 0x000120),
+ REG(SYS_COUNT_TX_128_511, 0x000124),
+ REG(SYS_COUNT_TX_512_1023, 0x000128),
+ REG(SYS_COUNT_TX_1024_1526, 0x00012c),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000130),
+ REG(SYS_COUNT_TX_AGING, 0x000170),
+ REG(SYS_RESET_CFG, 0x000508),
+ REG(SYS_CMID, 0x00050c),
+ REG(SYS_VLAN_ETYPE_CFG, 0x000510),
+ REG(SYS_PORT_MODE, 0x000514),
+ REG(SYS_FRONT_PORT_MODE, 0x000548),
+ REG(SYS_FRM_AGING, 0x000574),
+ REG(SYS_STAT_CFG, 0x000578),
+ REG(SYS_SW_STATUS, 0x00057c),
+ REG(SYS_MISC_CFG, 0x0005ac),
+ REG(SYS_REW_MAC_HIGH_CFG, 0x0005b0),
+ REG(SYS_REW_MAC_LOW_CFG, 0x0005dc),
+ REG(SYS_CM_ADDR, 0x000500),
+ REG(SYS_CM_DATA, 0x000504),
+ REG(SYS_PAUSE_CFG, 0x000608),
+ REG(SYS_PAUSE_TOT_CFG, 0x000638),
+ REG(SYS_ATOP, 0x00063c),
+ REG(SYS_ATOP_TOT_CFG, 0x00066c),
+ REG(SYS_MAC_FC_CFG, 0x000670),
+ REG(SYS_MMGT, 0x00069c),
+ REG(SYS_MMGT_FAST, 0x0006a0),
+ REG(SYS_EVENTS_DIF, 0x0006a4),
+ REG(SYS_EVENTS_CORE, 0x0006b4),
+ REG(SYS_CNT, 0x000000),
+ REG(SYS_PTP_STATUS, 0x0006b8),
+ REG(SYS_PTP_TXSTAMP, 0x0006bc),
+ REG(SYS_PTP_NXT, 0x0006c0),
+ REG(SYS_PTP_CFG, 0x0006c4),
+};
+EXPORT_SYMBOL(vsc7514_sys_regmap);
+
+const u32 vsc7514_vcap_regmap[] = {
+ /* VCAP_CORE_CFG */
+ REG(VCAP_CORE_UPDATE_CTRL, 0x000000),
+ REG(VCAP_CORE_MV_CFG, 0x000004),
+ /* VCAP_CORE_CACHE */
+ REG(VCAP_CACHE_ENTRY_DAT, 0x000008),
+ REG(VCAP_CACHE_MASK_DAT, 0x000108),
+ REG(VCAP_CACHE_ACTION_DAT, 0x000208),
+ REG(VCAP_CACHE_CNT_DAT, 0x000308),
+ REG(VCAP_CACHE_TG_DAT, 0x000388),
+ /* VCAP_CONST */
+ REG(VCAP_CONST_VCAP_VER, 0x000398),
+ REG(VCAP_CONST_ENTRY_WIDTH, 0x00039c),
+ REG(VCAP_CONST_ENTRY_CNT, 0x0003a0),
+ REG(VCAP_CONST_ENTRY_SWCNT, 0x0003a4),
+ REG(VCAP_CONST_ENTRY_TG_WIDTH, 0x0003a8),
+ REG(VCAP_CONST_ACTION_DEF_CNT, 0x0003ac),
+ REG(VCAP_CONST_ACTION_WIDTH, 0x0003b0),
+ REG(VCAP_CONST_CNT_WIDTH, 0x0003b4),
+ REG(VCAP_CONST_CORE_CNT, 0x0003b8),
+ REG(VCAP_CONST_IF_CNT, 0x0003bc),
+};
+EXPORT_SYMBOL(vsc7514_vcap_regmap);
+
+const u32 vsc7514_ptp_regmap[] = {
+ REG(PTP_PIN_CFG, 0x000000),
+ REG(PTP_PIN_TOD_SEC_MSB, 0x000004),
+ REG(PTP_PIN_TOD_SEC_LSB, 0x000008),
+ REG(PTP_PIN_TOD_NSEC, 0x00000c),
+ REG(PTP_PIN_WF_HIGH_PERIOD, 0x000014),
+ REG(PTP_PIN_WF_LOW_PERIOD, 0x000018),
+ REG(PTP_CFG_MISC, 0x0000a0),
+ REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4),
+ REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8),
+};
+EXPORT_SYMBOL(vsc7514_ptp_regmap);
+
+const u32 vsc7514_dev_gmii_regmap[] = {
+ REG(DEV_CLOCK_CFG, 0x0),
+ REG(DEV_PORT_MISC, 0x4),
+ REG(DEV_EVENTS, 0x8),
+ REG(DEV_EEE_CFG, 0xc),
+ REG(DEV_RX_PATH_DELAY, 0x10),
+ REG(DEV_TX_PATH_DELAY, 0x14),
+ REG(DEV_PTP_PREDICT_CFG, 0x18),
+ REG(DEV_MAC_ENA_CFG, 0x1c),
+ REG(DEV_MAC_MODE_CFG, 0x20),
+ REG(DEV_MAC_MAXLEN_CFG, 0x24),
+ REG(DEV_MAC_TAGS_CFG, 0x28),
+ REG(DEV_MAC_ADV_CHK_CFG, 0x2c),
+ REG(DEV_MAC_IFG_CFG, 0x30),
+ REG(DEV_MAC_HDX_CFG, 0x34),
+ REG(DEV_MAC_DBG_CFG, 0x38),
+ REG(DEV_MAC_FC_MAC_LOW_CFG, 0x3c),
+ REG(DEV_MAC_FC_MAC_HIGH_CFG, 0x40),
+ REG(DEV_MAC_STICKY, 0x44),
+ REG(PCS1G_CFG, 0x48),
+ REG(PCS1G_MODE_CFG, 0x4c),
+ REG(PCS1G_SD_CFG, 0x50),
+ REG(PCS1G_ANEG_CFG, 0x54),
+ REG(PCS1G_ANEG_NP_CFG, 0x58),
+ REG(PCS1G_LB_CFG, 0x5c),
+ REG(PCS1G_DBG_CFG, 0x60),
+ REG(PCS1G_CDET_CFG, 0x64),
+ REG(PCS1G_ANEG_STATUS, 0x68),
+ REG(PCS1G_ANEG_NP_STATUS, 0x6c),
+ REG(PCS1G_LINK_STATUS, 0x70),
+ REG(PCS1G_LINK_DOWN_CNT, 0x74),
+ REG(PCS1G_STICKY, 0x78),
+ REG(PCS1G_DEBUG_STATUS, 0x7c),
+ REG(PCS1G_LPI_CFG, 0x80),
+ REG(PCS1G_LPI_WAKE_ERROR_CNT, 0x84),
+ REG(PCS1G_LPI_STATUS, 0x88),
+ REG(PCS1G_TSTPAT_MODE_CFG, 0x8c),
+ REG(PCS1G_TSTPAT_STATUS, 0x90),
+ REG(DEV_PCS_FX100_CFG, 0x94),
+ REG(DEV_PCS_FX100_STATUS, 0x98),
+};
+EXPORT_SYMBOL(vsc7514_dev_gmii_regmap);
+
+const struct vcap_field vsc7514_vcap_es0_keys[] = {
+ [VCAP_ES0_EGR_PORT] = { 0, 4 },
+ [VCAP_ES0_IGR_PORT] = { 4, 4 },
+ [VCAP_ES0_RSV] = { 8, 2 },
+ [VCAP_ES0_L2_MC] = { 10, 1 },
+ [VCAP_ES0_L2_BC] = { 11, 1 },
+ [VCAP_ES0_VID] = { 12, 12 },
+ [VCAP_ES0_DP] = { 24, 1 },
+ [VCAP_ES0_PCP] = { 25, 3 },
+};
+EXPORT_SYMBOL(vsc7514_vcap_es0_keys);
+
+const struct vcap_field vsc7514_vcap_es0_actions[] = {
+ [VCAP_ES0_ACT_PUSH_OUTER_TAG] = { 0, 2 },
+ [VCAP_ES0_ACT_PUSH_INNER_TAG] = { 2, 1 },
+ [VCAP_ES0_ACT_TAG_A_TPID_SEL] = { 3, 2 },
+ [VCAP_ES0_ACT_TAG_A_VID_SEL] = { 5, 1 },
+ [VCAP_ES0_ACT_TAG_A_PCP_SEL] = { 6, 2 },
+ [VCAP_ES0_ACT_TAG_A_DEI_SEL] = { 8, 2 },
+ [VCAP_ES0_ACT_TAG_B_TPID_SEL] = { 10, 2 },
+ [VCAP_ES0_ACT_TAG_B_VID_SEL] = { 12, 1 },
+ [VCAP_ES0_ACT_TAG_B_PCP_SEL] = { 13, 2 },
+ [VCAP_ES0_ACT_TAG_B_DEI_SEL] = { 15, 2 },
+ [VCAP_ES0_ACT_VID_A_VAL] = { 17, 12 },
+ [VCAP_ES0_ACT_PCP_A_VAL] = { 29, 3 },
+ [VCAP_ES0_ACT_DEI_A_VAL] = { 32, 1 },
+ [VCAP_ES0_ACT_VID_B_VAL] = { 33, 12 },
+ [VCAP_ES0_ACT_PCP_B_VAL] = { 45, 3 },
+ [VCAP_ES0_ACT_DEI_B_VAL] = { 48, 1 },
+ [VCAP_ES0_ACT_RSV] = { 49, 24 },
+ [VCAP_ES0_ACT_HIT_STICKY] = { 73, 1 },
+};
+EXPORT_SYMBOL(vsc7514_vcap_es0_actions);
+
+const struct vcap_field vsc7514_vcap_is1_keys[] = {
+ [VCAP_IS1_HK_TYPE] = { 0, 1 },
+ [VCAP_IS1_HK_LOOKUP] = { 1, 2 },
+ [VCAP_IS1_HK_IGR_PORT_MASK] = { 3, 12 },
+ [VCAP_IS1_HK_RSV] = { 15, 9 },
+ [VCAP_IS1_HK_OAM_Y1731] = { 24, 1 },
+ [VCAP_IS1_HK_L2_MC] = { 25, 1 },
+ [VCAP_IS1_HK_L2_BC] = { 26, 1 },
+ [VCAP_IS1_HK_IP_MC] = { 27, 1 },
+ [VCAP_IS1_HK_VLAN_TAGGED] = { 28, 1 },
+ [VCAP_IS1_HK_VLAN_DBL_TAGGED] = { 29, 1 },
+ [VCAP_IS1_HK_TPID] = { 30, 1 },
+ [VCAP_IS1_HK_VID] = { 31, 12 },
+ [VCAP_IS1_HK_DEI] = { 43, 1 },
+ [VCAP_IS1_HK_PCP] = { 44, 3 },
+ /* Specific Fields for IS1 Half Key S1_NORMAL */
+ [VCAP_IS1_HK_L2_SMAC] = { 47, 48 },
+ [VCAP_IS1_HK_ETYPE_LEN] = { 95, 1 },
+ [VCAP_IS1_HK_ETYPE] = { 96, 16 },
+ [VCAP_IS1_HK_IP_SNAP] = { 112, 1 },
+ [VCAP_IS1_HK_IP4] = { 113, 1 },
+ /* Layer-3 Information */
+ [VCAP_IS1_HK_L3_FRAGMENT] = { 114, 1 },
+ [VCAP_IS1_HK_L3_FRAG_OFS_GT0] = { 115, 1 },
+ [VCAP_IS1_HK_L3_OPTIONS] = { 116, 1 },
+ [VCAP_IS1_HK_L3_DSCP] = { 117, 6 },
+ [VCAP_IS1_HK_L3_IP4_SIP] = { 123, 32 },
+ /* Layer-4 Information */
+ [VCAP_IS1_HK_TCP_UDP] = { 155, 1 },
+ [VCAP_IS1_HK_TCP] = { 156, 1 },
+ [VCAP_IS1_HK_L4_SPORT] = { 157, 16 },
+ [VCAP_IS1_HK_L4_RNG] = { 173, 8 },
+ /* Specific Fields for IS1 Half Key S1_5TUPLE_IP4 */
+ [VCAP_IS1_HK_IP4_INNER_TPID] = { 47, 1 },
+ [VCAP_IS1_HK_IP4_INNER_VID] = { 48, 12 },
+ [VCAP_IS1_HK_IP4_INNER_DEI] = { 60, 1 },
+ [VCAP_IS1_HK_IP4_INNER_PCP] = { 61, 3 },
+ [VCAP_IS1_HK_IP4_IP4] = { 64, 1 },
+ [VCAP_IS1_HK_IP4_L3_FRAGMENT] = { 65, 1 },
+ [VCAP_IS1_HK_IP4_L3_FRAG_OFS_GT0] = { 66, 1 },
+ [VCAP_IS1_HK_IP4_L3_OPTIONS] = { 67, 1 },
+ [VCAP_IS1_HK_IP4_L3_DSCP] = { 68, 6 },
+ [VCAP_IS1_HK_IP4_L3_IP4_DIP] = { 74, 32 },
+ [VCAP_IS1_HK_IP4_L3_IP4_SIP] = { 106, 32 },
+ [VCAP_IS1_HK_IP4_L3_PROTO] = { 138, 8 },
+ [VCAP_IS1_HK_IP4_TCP_UDP] = { 146, 1 },
+ [VCAP_IS1_HK_IP4_TCP] = { 147, 1 },
+ [VCAP_IS1_HK_IP4_L4_RNG] = { 148, 8 },
+ [VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE] = { 156, 32 },
+};
+EXPORT_SYMBOL(vsc7514_vcap_is1_keys);
+
+const struct vcap_field vsc7514_vcap_is1_actions[] = {
+ [VCAP_IS1_ACT_DSCP_ENA] = { 0, 1 },
+ [VCAP_IS1_ACT_DSCP_VAL] = { 1, 6 },
+ [VCAP_IS1_ACT_QOS_ENA] = { 7, 1 },
+ [VCAP_IS1_ACT_QOS_VAL] = { 8, 3 },
+ [VCAP_IS1_ACT_DP_ENA] = { 11, 1 },
+ [VCAP_IS1_ACT_DP_VAL] = { 12, 1 },
+ [VCAP_IS1_ACT_PAG_OVERRIDE_MASK] = { 13, 8 },
+ [VCAP_IS1_ACT_PAG_VAL] = { 21, 8 },
+ [VCAP_IS1_ACT_RSV] = { 29, 9 },
+ /* The fields below are incorrectly shifted by 2 in the manual */
+ [VCAP_IS1_ACT_VID_REPLACE_ENA] = { 38, 1 },
+ [VCAP_IS1_ACT_VID_ADD_VAL] = { 39, 12 },
+ [VCAP_IS1_ACT_FID_SEL] = { 51, 2 },
+ [VCAP_IS1_ACT_FID_VAL] = { 53, 13 },
+ [VCAP_IS1_ACT_PCP_DEI_ENA] = { 66, 1 },
+ [VCAP_IS1_ACT_PCP_VAL] = { 67, 3 },
+ [VCAP_IS1_ACT_DEI_VAL] = { 70, 1 },
+ [VCAP_IS1_ACT_VLAN_POP_CNT_ENA] = { 71, 1 },
+ [VCAP_IS1_ACT_VLAN_POP_CNT] = { 72, 2 },
+ [VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA] = { 74, 4 },
+ [VCAP_IS1_ACT_HIT_STICKY] = { 78, 1 },
+};
+EXPORT_SYMBOL(vsc7514_vcap_is1_actions);
+
+const struct vcap_field vsc7514_vcap_is2_keys[] = {
+ /* Common: 46 bits */
+ [VCAP_IS2_TYPE] = { 0, 4 },
+ [VCAP_IS2_HK_FIRST] = { 4, 1 },
+ [VCAP_IS2_HK_PAG] = { 5, 8 },
+ [VCAP_IS2_HK_IGR_PORT_MASK] = { 13, 12 },
+ [VCAP_IS2_HK_RSV2] = { 25, 1 },
+ [VCAP_IS2_HK_HOST_MATCH] = { 26, 1 },
+ [VCAP_IS2_HK_L2_MC] = { 27, 1 },
+ [VCAP_IS2_HK_L2_BC] = { 28, 1 },
+ [VCAP_IS2_HK_VLAN_TAGGED] = { 29, 1 },
+ [VCAP_IS2_HK_VID] = { 30, 12 },
+ [VCAP_IS2_HK_DEI] = { 42, 1 },
+ [VCAP_IS2_HK_PCP] = { 43, 3 },
+ /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */
+ [VCAP_IS2_HK_L2_DMAC] = { 46, 48 },
+ [VCAP_IS2_HK_L2_SMAC] = { 94, 48 },
+ /* MAC_ETYPE (TYPE=000) */
+ [VCAP_IS2_HK_MAC_ETYPE_ETYPE] = { 142, 16 },
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0] = { 158, 16 },
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1] = { 174, 8 },
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2] = { 182, 3 },
+ /* MAC_LLC (TYPE=001) */
+ [VCAP_IS2_HK_MAC_LLC_L2_LLC] = { 142, 40 },
+ /* MAC_SNAP (TYPE=010) */
+ [VCAP_IS2_HK_MAC_SNAP_L2_SNAP] = { 142, 40 },
+ /* MAC_ARP (TYPE=011) */
+ [VCAP_IS2_HK_MAC_ARP_SMAC] = { 46, 48 },
+ [VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK] = { 94, 1 },
+ [VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK] = { 95, 1 },
+ [VCAP_IS2_HK_MAC_ARP_LEN_OK] = { 96, 1 },
+ [VCAP_IS2_HK_MAC_ARP_TARGET_MATCH] = { 97, 1 },
+ [VCAP_IS2_HK_MAC_ARP_SENDER_MATCH] = { 98, 1 },
+ [VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN] = { 99, 1 },
+ [VCAP_IS2_HK_MAC_ARP_OPCODE] = { 100, 2 },
+ [VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP] = { 102, 32 },
+ [VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP] = { 134, 32 },
+ [VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP] = { 166, 1 },
+ /* IP4_TCP_UDP / IP4_OTHER common */
+ [VCAP_IS2_HK_IP4] = { 46, 1 },
+ [VCAP_IS2_HK_L3_FRAGMENT] = { 47, 1 },
+ [VCAP_IS2_HK_L3_FRAG_OFS_GT0] = { 48, 1 },
+ [VCAP_IS2_HK_L3_OPTIONS] = { 49, 1 },
+ [VCAP_IS2_HK_IP4_L3_TTL_GT0] = { 50, 1 },
+ [VCAP_IS2_HK_L3_TOS] = { 51, 8 },
+ [VCAP_IS2_HK_L3_IP4_DIP] = { 59, 32 },
+ [VCAP_IS2_HK_L3_IP4_SIP] = { 91, 32 },
+ [VCAP_IS2_HK_DIP_EQ_SIP] = { 123, 1 },
+ /* IP4_TCP_UDP (TYPE=100) */
+ [VCAP_IS2_HK_TCP] = { 124, 1 },
+ [VCAP_IS2_HK_L4_DPORT] = { 125, 16 },
+ [VCAP_IS2_HK_L4_SPORT] = { 141, 16 },
+ [VCAP_IS2_HK_L4_RNG] = { 157, 8 },
+ [VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = { 165, 1 },
+ [VCAP_IS2_HK_L4_SEQUENCE_EQ0] = { 166, 1 },
+ [VCAP_IS2_HK_L4_FIN] = { 167, 1 },
+ [VCAP_IS2_HK_L4_SYN] = { 168, 1 },
+ [VCAP_IS2_HK_L4_RST] = { 169, 1 },
+ [VCAP_IS2_HK_L4_PSH] = { 170, 1 },
+ [VCAP_IS2_HK_L4_ACK] = { 171, 1 },
+ [VCAP_IS2_HK_L4_URG] = { 172, 1 },
+ [VCAP_IS2_HK_L4_1588_DOM] = { 173, 8 },
+ [VCAP_IS2_HK_L4_1588_VER] = { 181, 4 },
+ /* IP4_OTHER (TYPE=101) */
+ [VCAP_IS2_HK_IP4_L3_PROTO] = { 124, 8 },
+ [VCAP_IS2_HK_L3_PAYLOAD] = { 132, 56 },
+ /* IP6_STD (TYPE=110) */
+ [VCAP_IS2_HK_IP6_L3_TTL_GT0] = { 46, 1 },
+ [VCAP_IS2_HK_L3_IP6_SIP] = { 47, 128 },
+ [VCAP_IS2_HK_IP6_L3_PROTO] = { 175, 8 },
+ /* OAM (TYPE=111) */
+ [VCAP_IS2_HK_OAM_MEL_FLAGS] = { 142, 7 },
+ [VCAP_IS2_HK_OAM_VER] = { 149, 5 },
+ [VCAP_IS2_HK_OAM_OPCODE] = { 154, 8 },
+ [VCAP_IS2_HK_OAM_FLAGS] = { 162, 8 },
+ [VCAP_IS2_HK_OAM_MEPID] = { 170, 16 },
+ [VCAP_IS2_HK_OAM_CCM_CNTS_EQ0] = { 186, 1 },
+ [VCAP_IS2_HK_OAM_IS_Y1731] = { 187, 1 },
+};
+EXPORT_SYMBOL(vsc7514_vcap_is2_keys);
+
+const struct vcap_field vsc7514_vcap_is2_actions[] = {
+ [VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1 },
+ [VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1 },
+ [VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3 },
+ [VCAP_IS2_ACT_MASK_MODE] = { 5, 2 },
+ [VCAP_IS2_ACT_MIRROR_ENA] = { 7, 1 },
+ [VCAP_IS2_ACT_LRN_DIS] = { 8, 1 },
+ [VCAP_IS2_ACT_POLICE_ENA] = { 9, 1 },
+ [VCAP_IS2_ACT_POLICE_IDX] = { 10, 9 },
+ [VCAP_IS2_ACT_POLICE_VCAP_ONLY] = { 19, 1 },
+ [VCAP_IS2_ACT_PORT_MASK] = { 20, 11 },
+ [VCAP_IS2_ACT_REW_OP] = { 31, 9 },
+ [VCAP_IS2_ACT_SMAC_REPLACE_ENA] = { 40, 1 },
+ [VCAP_IS2_ACT_RSV] = { 41, 2 },
+ [VCAP_IS2_ACT_ACL_ID] = { 43, 6 },
+ [VCAP_IS2_ACT_HIT_CNT] = { 49, 32 },
+};
+EXPORT_SYMBOL(vsc7514_vcap_is2_actions);
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index d74a80f010c5..3f371faeb6d0 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -114,6 +114,7 @@ static int sonic_probe1(struct net_device *dev)
struct sonic_local *lp = netdev_priv(dev);
int err = -ENODEV;
int i;
+ unsigned char addr[ETH_ALEN];
if (!request_mem_region(dev->base_addr, SONIC_MEM_SIZE, jazz_sonic_string))
return -EBUSY;
@@ -143,9 +144,10 @@ static int sonic_probe1(struct net_device *dev)
SONIC_WRITE(SONIC_CEP,0);
for (i=0; i<3; i++) {
val = SONIC_READ(SONIC_CAP0-i);
- dev->dev_addr[i*2] = val;
- dev->dev_addr[i*2+1] = val >> 8;
+ addr[i*2] = val;
+ addr[i*2+1] = val >> 8;
}
+ eth_hw_addr_set(dev, addr);
lp->dma_bitmode = SONIC_BITMODE32;
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index ca4686094701..52fef34d43f9 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -120,13 +120,14 @@ static const struct net_device_ops xtsonic_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
-static int __init sonic_probe1(struct net_device *dev)
+static int sonic_probe1(struct net_device *dev)
{
unsigned int silicon_revision;
struct sonic_local *lp = netdev_priv(dev);
unsigned int base_addr = dev->base_addr;
int i;
int err = 0;
+ unsigned char addr[ETH_ALEN];
if (!request_mem_region(base_addr, 0x100, xtsonic_string))
return -EBUSY;
@@ -163,9 +164,10 @@ static int __init sonic_probe1(struct net_device *dev)
for (i=0; i<3; i++) {
unsigned int val = SONIC_READ(SONIC_CAP0-i);
- dev->dev_addr[i*2] = val;
- dev->dev_addr[i*2+1] = val >> 8;
+ addr[i*2] = val;
+ addr[i*2+1] = val >> 8;
}
+ eth_hw_addr_set(dev, addr);
lp->dma_bitmode = SONIC_BITMODE32;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 1969009a91e7..2c2e9e56ed4e 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -3159,10 +3159,6 @@ static int vxge_hwtstamp_set(struct vxgedev *vdev, void __user *data)
if (copy_from_user(&config, data, sizeof(config)))
return -EFAULT;
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
/* Transmit HW Timestamp not supported */
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index d7ac0307797f..34c0d2ddf9ef 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -803,8 +803,10 @@ int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size)
return -ENOMEM;
cache = kzalloc(sizeof(*cache), GFP_KERNEL);
- if (!cache)
+ if (!cache) {
+ nfp_cpp_area_free(area);
return -ENOMEM;
+ }
cache->id = 0;
cache->addr = 0;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 71d234291fc5..1dc40c537281 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -210,9 +210,6 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
- if (cfg.flags) /* reserved for future extensions */
- return -EINVAL;
-
/* Get ieee1588's dev information */
pdev = adapter->ptp_pdev;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index f2cedbd9489c..ed1a84542ad2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -2721,6 +2721,25 @@ void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
#define NUM_STORMS 6
/**
+ * qed_get_protocol_type_str(): Get a string for Protocol type.
+ *
+ * @protocol_type: Protocol type (using enum protocol_type).
+ *
+ * Return: String.
+ */
+const char *qed_get_protocol_type_str(u32 protocol_type);
+
+/**
+ * qed_get_ramrod_cmd_id_str(): Get a string for Ramrod command ID.
+ *
+ * @protocol_type: Protocol type (using enum protocol_type).
+ * @ramrod_cmd_id: Ramrod command ID (using per-protocol enum <protocol>_ramrod_cmd_id).
+ *
+ * Return: String.
+ */
+const char *qed_get_ramrod_cmd_id_str(u32 protocol_type, u32 ramrod_cmd_id);
+
+/**
* qed_set_rdma_error_level(): Sets the RDMA assert level.
* If the severity of the error will be
* above the level, the FW will assert.
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index 321c43408153..0ce37f2460a4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -210,6 +210,82 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {
(XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \
XSTORM_PQ_INFO_OFFSET(pq_id))
+static const char * const s_protocol_types[] = {
+ "PROTOCOLID_ISCSI", "PROTOCOLID_FCOE", "PROTOCOLID_ROCE",
+ "PROTOCOLID_CORE", "PROTOCOLID_ETH", "PROTOCOLID_IWARP",
+ "PROTOCOLID_TOE", "PROTOCOLID_PREROCE", "PROTOCOLID_COMMON",
+ "PROTOCOLID_TCP", "PROTOCOLID_RDMA", "PROTOCOLID_SCSI",
+};
+
+static const char *s_ramrod_cmd_ids[][28] = {
+ {
+ "ISCSI_RAMROD_CMD_ID_UNUSED", "ISCSI_RAMROD_CMD_ID_INIT_FUNC",
+ "ISCSI_RAMROD_CMD_ID_DESTROY_FUNC",
+ "ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN",
+ "ISCSI_RAMROD_CMD_ID_UPDATE_CONN",
+ "ISCSI_RAMROD_CMD_ID_TERMINATION_CONN",
+ "ISCSI_RAMROD_CMD_ID_CLEAR_SQ", "ISCSI_RAMROD_CMD_ID_MAC_UPDATE",
+ "ISCSI_RAMROD_CMD_ID_CONN_STATS", },
+ { "FCOE_RAMROD_CMD_ID_INIT_FUNC", "FCOE_RAMROD_CMD_ID_DESTROY_FUNC",
+ "FCOE_RAMROD_CMD_ID_STAT_FUNC",
+ "FCOE_RAMROD_CMD_ID_OFFLOAD_CONN",
+ "FCOE_RAMROD_CMD_ID_TERMINATE_CONN", },
+ { "RDMA_RAMROD_UNUSED", "RDMA_RAMROD_FUNC_INIT",
+ "RDMA_RAMROD_FUNC_CLOSE", "RDMA_RAMROD_REGISTER_MR",
+ "RDMA_RAMROD_DEREGISTER_MR", "RDMA_RAMROD_CREATE_CQ",
+ "RDMA_RAMROD_RESIZE_CQ", "RDMA_RAMROD_DESTROY_CQ",
+ "RDMA_RAMROD_CREATE_SRQ", "RDMA_RAMROD_MODIFY_SRQ",
+ "RDMA_RAMROD_DESTROY_SRQ", "RDMA_RAMROD_START_NS_TRACKING",
+ "RDMA_RAMROD_STOP_NS_TRACKING", "ROCE_RAMROD_CREATE_QP",
+ "ROCE_RAMROD_MODIFY_QP", "ROCE_RAMROD_QUERY_QP",
+ "ROCE_RAMROD_DESTROY_QP", "ROCE_RAMROD_CREATE_UD_QP",
+ "ROCE_RAMROD_DESTROY_UD_QP", "ROCE_RAMROD_FUNC_UPDATE",
+ "ROCE_RAMROD_SUSPEND_QP", "ROCE_RAMROD_QUERY_SUSPENDED_QP",
+ "ROCE_RAMROD_CREATE_SUSPENDED_QP", "ROCE_RAMROD_RESUME_QP",
+ "ROCE_RAMROD_SUSPEND_UD_QP", "ROCE_RAMROD_RESUME_UD_QP",
+ "ROCE_RAMROD_CREATE_SUSPENDED_UD_QP", "ROCE_RAMROD_FLUSH_DPT_QP", },
+ { "CORE_RAMROD_UNUSED", "CORE_RAMROD_RX_QUEUE_START",
+ "CORE_RAMROD_TX_QUEUE_START", "CORE_RAMROD_RX_QUEUE_STOP",
+ "CORE_RAMROD_TX_QUEUE_STOP",
+ "CORE_RAMROD_RX_QUEUE_FLUSH",
+ "CORE_RAMROD_TX_QUEUE_UPDATE", "CORE_RAMROD_QUEUE_STATS_QUERY", },
+ { "ETH_RAMROD_UNUSED", "ETH_RAMROD_VPORT_START",
+ "ETH_RAMROD_VPORT_UPDATE", "ETH_RAMROD_VPORT_STOP",
+ "ETH_RAMROD_RX_QUEUE_START", "ETH_RAMROD_RX_QUEUE_STOP",
+ "ETH_RAMROD_TX_QUEUE_START", "ETH_RAMROD_TX_QUEUE_STOP",
+ "ETH_RAMROD_FILTERS_UPDATE", "ETH_RAMROD_RX_QUEUE_UPDATE",
+ "ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION",
+ "ETH_RAMROD_RX_ADD_OPENFLOW_FILTER",
+ "ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER",
+ "ETH_RAMROD_RX_ADD_UDP_FILTER",
+ "ETH_RAMROD_RX_DELETE_UDP_FILTER",
+ "ETH_RAMROD_RX_CREATE_GFT_ACTION",
+ "ETH_RAMROD_RX_UPDATE_GFT_FILTER", "ETH_RAMROD_TX_QUEUE_UPDATE",
+ "ETH_RAMROD_RGFS_FILTER_ADD", "ETH_RAMROD_RGFS_FILTER_DEL",
+ "ETH_RAMROD_TGFS_FILTER_ADD", "ETH_RAMROD_TGFS_FILTER_DEL",
+ "ETH_RAMROD_GFS_COUNTERS_REPORT_REQUEST", },
+ { "RDMA_RAMROD_UNUSED", "RDMA_RAMROD_FUNC_INIT",
+ "RDMA_RAMROD_FUNC_CLOSE", "RDMA_RAMROD_REGISTER_MR",
+ "RDMA_RAMROD_DEREGISTER_MR", "RDMA_RAMROD_CREATE_CQ",
+ "RDMA_RAMROD_RESIZE_CQ", "RDMA_RAMROD_DESTROY_CQ",
+ "RDMA_RAMROD_CREATE_SRQ", "RDMA_RAMROD_MODIFY_SRQ",
+ "RDMA_RAMROD_DESTROY_SRQ", "RDMA_RAMROD_START_NS_TRACKING",
+ "RDMA_RAMROD_STOP_NS_TRACKING",
+ "IWARP_RAMROD_CMD_ID_TCP_OFFLOAD",
+ "IWARP_RAMROD_CMD_ID_MPA_OFFLOAD",
+ "IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR",
+ "IWARP_RAMROD_CMD_ID_CREATE_QP", "IWARP_RAMROD_CMD_ID_QUERY_QP",
+ "IWARP_RAMROD_CMD_ID_MODIFY_QP",
+ "IWARP_RAMROD_CMD_ID_DESTROY_QP",
+ "IWARP_RAMROD_CMD_ID_ABORT_TCP_OFFLOAD", },
+ { NULL }, /*TOE*/
+ { NULL }, /*PREROCE*/
+ { "COMMON_RAMROD_UNUSED", "COMMON_RAMROD_PF_START",
+ "COMMON_RAMROD_PF_STOP", "COMMON_RAMROD_VF_START",
+ "COMMON_RAMROD_VF_STOP", "COMMON_RAMROD_PF_UPDATE",
+ "COMMON_RAMROD_RL_UPDATE", "COMMON_RAMROD_EMPTY", }
+};
+
/******************** INTERNAL IMPLEMENTATION *********************/
/* Returns the external VOQ number */
@@ -1647,6 +1723,32 @@ void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
}
+const char *qed_get_protocol_type_str(u32 protocol_type)
+{
+ if (protocol_type >= ARRAY_SIZE(s_protocol_types))
+ return "Invalid protocol type";
+
+ return s_protocol_types[protocol_type];
+}
+
+const char *qed_get_ramrod_cmd_id_str(u32 protocol_type, u32 ramrod_cmd_id)
+{
+ const char *ramrod_cmd_id_str;
+
+ if (protocol_type >= ARRAY_SIZE(s_ramrod_cmd_ids))
+ return "Invalid protocol type";
+
+ if (ramrod_cmd_id >= ARRAY_SIZE(s_ramrod_cmd_ids[0]))
+ return "Invalid Ramrod command ID";
+
+ ramrod_cmd_id_str = s_ramrod_cmd_ids[protocol_type][ramrod_cmd_id];
+
+ if (!ramrod_cmd_id_str)
+ return "Invalid Ramrod command ID";
+
+ return ramrod_cmd_id_str;
+}
+
static u32 qed_get_rdma_assert_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id)
{
switch (storm_id) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 6958adeca86d..82e74f62b677 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -2399,3 +2399,25 @@ int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
return rc;
}
+
+int qed_int_get_sb_dbg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_sb_info *p_sb, struct qed_sb_info_dbg *p_info)
+{
+ u16 sbid = p_sb->igu_sb_id;
+ u32 i;
+
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
+ if (sbid >= NUM_OF_SBS(p_hwfn->cdev))
+ return -EINVAL;
+
+ p_info->igu_prod = qed_rd(p_hwfn, p_ptt, IGU_REG_PRODUCER_MEMORY + sbid * 4);
+ p_info->igu_cons = qed_rd(p_hwfn, p_ptt, IGU_REG_CONSUMER_MEM + sbid * 4);
+
+ for (i = 0; i < PIS_PER_SB; i++)
+ p_info->pi[i] = (u16)qed_rd(p_hwfn, p_ptt,
+ CAU_REG_PI_MEMORY + sbid * 4 * PIS_PER_SB + i * 4);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 84c17e97f569..7e5127f61744 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -186,6 +186,19 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev);
void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable);
/**
+ * qed_int_get_sb_dbg: Read debug information regarding a given SB
+ *
+ * @p_hwfn: hw function pointer
+ * @p_ptt: ptt resource
+ * @p_sb: pointer to status block for which we want to get info
+ * @p_info: pointer to struct to fill with information regarding SB
+ *
+ * Return: 0 with status block info filled on success, otherwise return error
+ */
+int qed_int_get_sb_dbg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_sb_info *p_sb, struct qed_sb_info_dbg *p_info);
+
+/**
* qed_db_rec_handler(): Doorbell Recovery handler.
* Run doorbell recovery in case of PF overflow (and flush DORQ if
* needed).
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 7673b3e07736..46d4207f22a3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -447,7 +447,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->wol_support = true;
dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn);
-
+ dev_info->esl = qed_mcp_is_esl_supported(p_hwfn);
dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
} else {
qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
@@ -2936,6 +2936,30 @@ out:
return status;
}
+static int
+qed_get_sb_info(struct qed_dev *cdev, struct qed_sb_info *sb,
+ u16 qid, struct qed_sb_info_dbg *sb_dbg)
+{
+ struct qed_hwfn *hwfn = &cdev->hwfns[qid % cdev->num_hwfns];
+ struct qed_ptt *ptt;
+ int rc;
+
+ if (IS_VF(cdev))
+ return -EINVAL;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_NOTICE(hwfn, "Can't acquire PTT\n");
+ return -EAGAIN;
+ }
+
+ memset(sb_dbg, 0, sizeof(*sb_dbg));
+ rc = qed_int_get_sb_dbg(hwfn, ptt, sb, sb_dbg);
+
+ qed_ptt_release(hwfn, ptt);
+ return rc;
+}
+
static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf,
u8 dev_addr, u32 offset, u32 len)
{
@@ -2978,11 +3002,54 @@ static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val)
return rc;
}
+static __printf(2, 3) void qed_mfw_report(struct qed_dev *cdev, char *fmt, ...)
+{
+ char buf[QED_MFW_REPORT_STR_SIZE];
+ struct qed_hwfn *p_hwfn;
+ struct qed_ptt *p_ptt;
+ va_list vl;
+
+ va_start(vl, fmt);
+ vsnprintf(buf, QED_MFW_REPORT_STR_SIZE, fmt, vl);
+ va_end(vl);
+
+ if (IS_PF(cdev)) {
+ p_hwfn = QED_LEADING_HWFN(cdev);
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (p_ptt) {
+ qed_mcp_send_raw_debug_data(p_hwfn, p_ptt, buf, strlen(buf));
+ qed_ptt_release(p_hwfn, p_ptt);
+ }
+ }
+}
+
static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev)
{
return QED_AFFIN_HWFN_IDX(cdev);
}
+static int qed_get_esl_status(struct qed_dev *cdev, bool *esl_active)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *ptt;
+ int rc = 0;
+
+ *esl_active = false;
+
+ if (IS_VF(cdev))
+ return 0;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ rc = qed_mcp_get_esl_status(hwfn, ptt, esl_active);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
static struct qed_selftest_ops qed_selftest_ops_pass = {
.selftest_memory = &qed_selftest_memory,
.selftest_interrupt = &qed_selftest_interrupt,
@@ -3038,6 +3105,9 @@ const struct qed_common_ops qed_common_ops_pass = {
.read_nvm_cfg = &qed_nvm_flash_cfg_read,
.read_nvm_cfg_len = &qed_nvm_flash_cfg_len,
.set_grc_config = &qed_set_grc_config,
+ .mfw_report = &qed_mfw_report,
+ .get_sb_info = &qed_get_sb_info,
+ .get_esl_status = &qed_get_esl_status,
};
void qed_get_protocol_stats(struct qed_dev *cdev,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 64678a256f3b..da1eadabcb41 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -4158,3 +4158,25 @@ qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn,
return qed_mcp_send_debug_data(p_hwfn, p_ptt,
QED_MCP_DBG_DATA_TYPE_RAW, p_buf, size);
}
+
+bool qed_mcp_is_esl_supported(struct qed_hwfn *p_hwfn)
+{
+ return !!(p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_ENHANCED_SYS_LCK);
+}
+
+int qed_mcp_get_esl_status(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool *active)
+{
+ u32 resp = 0, param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MANAGEMENT_STATUS, 0, &resp, &param);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to send ESL command, rc = %d\n", rc);
+ return rc;
+ }
+
+ *active = !!(param & FW_MB_PARAM_MANAGEMENT_STATUS_LOCKDOWN_ENABLED);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 564723800d15..369e1892450a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -15,6 +15,8 @@
#include "qed_hsi.h"
#include "qed_dev_api.h"
+#define QED_MFW_REPORT_STR_SIZE 256
+
struct qed_mcp_link_speed_params {
bool autoneg;
@@ -1337,4 +1339,24 @@ int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
u32 len);
+
+/**
+ * qed_mcp_is_esl_supported(): Return whether management firmware support ESL or not.
+ *
+ * @p_hwfn: hw function pointer
+ *
+ * Return: true if esl is supported, otherwise return false
+ */
+bool qed_mcp_is_esl_supported(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_mcp_get_esl_status(): Get enhanced system lockdown status
+ *
+ * @p_hwfn: hw function pointer
+ * @p_ptt: ptt resource pointer
+ * @active: ESL active status data pointer
+ *
+ * Return: 0 with esl status info on success, otherwise return error
+ */
+int qed_mcp_get_esl_status(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool *active);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h
index 8a0e3c5d4bda..b70ee8200e15 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h
@@ -1191,6 +1191,7 @@ enum drv_msg_code_enum {
DRV_MSG_CODE_CFG_VF_MSIX = DRV_MSG_CODE(0xc001),
DRV_MSG_CODE_CFG_PF_VFS_MSIX = DRV_MSG_CODE(0xc002),
DRV_MSG_CODE_DEBUG_DATA_SEND = DRV_MSG_CODE(0xc004),
+ DRV_MSG_CODE_GET_MANAGEMENT_STATUS = DRV_MSG_CODE(0xc007),
};
#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 6f1a52e6beb2..b5e35f433a20 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -550,6 +550,8 @@
0x1 << 1)
#define IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN ( \
0x1 << 0)
+#define IGU_REG_PRODUCER_MEMORY 0x182000UL
+#define IGU_REG_CONSUMER_MEM 0x183000UL
#define IGU_REG_MAPPING_MEMORY \
0x184000UL
#define IGU_REG_STATISTIC_NUM_VF_MSG_SENT \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 648176dfb871..3b54da963554 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -85,10 +85,12 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
goto err;
}
- DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
- "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
- opaque_cid, cmd, protocol,
- (unsigned long)&p_ent->ramrod,
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SPQ,
+ "Initialized: CID %08x %s:[%02x] %s:%02x data_addr %llx comp_mode [%s]\n",
+ opaque_cid, qed_get_ramrod_cmd_id_str(protocol, cmd),
+ cmd, qed_get_protocol_type_str(protocol), protocol,
+ (unsigned long long)(uintptr_t)&p_ent->ramrod,
D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
"MODE_CB"));
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index e0473729b161..d01b9245f811 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -139,10 +139,13 @@ err:
if (!p_ptt)
return -EBUSY;
qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_RAMROD_FAIL,
- "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
+ "Ramrod is stuck [CID %08x %s:%02x %s:%02x echo %04x]\n",
le32_to_cpu(p_ent->elem.hdr.cid),
+ qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id,
+ p_ent->elem.hdr.cmd_id),
p_ent->elem.hdr.cmd_id,
- p_ent->elem.hdr.protocol_id,
+ qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id),
+ p_ent->elem.hdr.protocol_id,
le16_to_cpu(p_ent->elem.hdr.echo));
qed_ptt_release(p_hwfn, p_ptt);
@@ -170,13 +173,16 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
- DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
- "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SPQ,
+ "Ramrod hdr: [CID 0x%08x %s:0x%02x %s:0x%02x] Data ptr: [%08x:%08x] Cmpltion Mode: %s\n",
p_ent->elem.hdr.cid,
+ qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id,
+ p_ent->elem.hdr.cmd_id),
p_ent->elem.hdr.cmd_id,
- p_ent->elem.hdr.protocol_id,
- p_ent->elem.data_ptr.hi,
- p_ent->elem.data_ptr.lo,
+ qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id),
+ p_ent->elem.hdr.protocol_id,
+ p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
"MODE_CB"));
@@ -271,17 +277,27 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
{
qed_spq_async_comp_cb cb;
- if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
+ if (!p_hwfn->p_spq)
return -EINVAL;
+ if (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE) {
+ DP_ERR(p_hwfn, "Wrong protocol: %s:%d\n",
+ qed_get_protocol_type_str(p_eqe->protocol_id),
+ p_eqe->protocol_id);
+
+ return -EINVAL;
+ }
+
cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
if (cb) {
return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
&p_eqe->data, p_eqe->fw_return_code);
} else {
DP_NOTICE(p_hwfn,
- "Unknown Async completion for protocol: %d\n",
+ "Unknown Async completion for %s:%d\n",
+ qed_get_protocol_type_str(p_eqe->protocol_id),
p_eqe->protocol_id);
+
return -EINVAL;
}
}
@@ -830,8 +846,12 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
if (p_hwfn->cdev->recov_in_prog) {
DP_VERBOSE(p_hwfn,
QED_MSG_SPQ,
- "Recovery is in progress. Skip spq post [cmd %02x protocol %02x]\n",
- p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
+ "Recovery is in progress. Skip spq post [%s:%02x %s:%02x]\n",
+ qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id,
+ p_ent->elem.hdr.cmd_id),
+ p_ent->elem.hdr.cmd_id,
+ qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id),
+ p_ent->elem.hdr.protocol_id);
/* Let the flow complete w/o any error handling */
qed_spq_recov_set_ret_code(p_ent, fw_return_code);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 100c9c52c20b..97a7ab0826ed 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -168,6 +168,8 @@ enum {
QEDE_PRI_FLAG_CMT,
QEDE_PRI_FLAG_SMART_AN_SUPPORT, /* MFW supports SmartAN */
QEDE_PRI_FLAG_RECOVER_ON_ERROR,
+ QEDE_PRI_FLAG_ESL_SUPPORT, /* MFW supports Enhanced System Lockdown */
+ QEDE_PRI_FLAG_ESL_ACTIVE, /* Enhanced System Lockdown Active status */
QEDE_PRI_FLAG_LEN,
};
@@ -175,6 +177,8 @@ static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
"Coupled-Function",
"SmartAN capable",
"Recover on error",
+ "ESL capable",
+ "ESL active",
};
enum qede_ethtool_tests {
@@ -478,6 +482,7 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
static u32 qede_get_priv_flags(struct net_device *dev)
{
struct qede_dev *edev = netdev_priv(dev);
+ bool esl_active;
u32 flags = 0;
if (edev->dev_info.common.num_hwfns > 1)
@@ -489,6 +494,14 @@ static u32 qede_get_priv_flags(struct net_device *dev)
if (edev->err_flags & BIT(QEDE_ERR_IS_RECOVERABLE))
flags |= BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR);
+ if (edev->dev_info.common.esl)
+ flags |= BIT(QEDE_PRI_FLAG_ESL_SUPPORT);
+
+ edev->ops->common->get_esl_status(edev->cdev, &esl_active);
+
+ if (esl_active)
+ flags |= BIT(QEDE_PRI_FLAG_ESL_ACTIVE);
+
return flags;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index e113fbd56e86..5ea9cb4311a1 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1644,6 +1644,13 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
data_split = true;
}
} else {
+ if (unlikely(skb->len > ETH_TX_MAX_NON_LSO_PKT_LEN)) {
+ DP_ERR(edev, "Unexpected non LSO skb length = 0x%x\n", skb->len);
+ qede_free_failed_tx_pkt(txq, first_bd, 0, false);
+ qede_update_tx_producer(txq);
+ return NETDEV_TX_OK;
+ }
+
val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 06c6a5813606..b4e5a15e308b 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -509,34 +509,95 @@ static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
}
-static void qede_tx_log_print(struct qede_dev *edev, struct qede_tx_queue *txq)
+static void qede_fp_sb_dump(struct qede_dev *edev, struct qede_fastpath *fp)
{
+ char *p_sb = (char *)fp->sb_info->sb_virt;
+ u32 sb_size, i;
+
+ sb_size = sizeof(struct status_block);
+
+ for (i = 0; i < sb_size; i += 8)
+ DP_NOTICE(edev,
+ "%02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX\n",
+ p_sb[i], p_sb[i + 1], p_sb[i + 2], p_sb[i + 3],
+ p_sb[i + 4], p_sb[i + 5], p_sb[i + 6], p_sb[i + 7]);
+}
+
+static void
+qede_txq_fp_log_metadata(struct qede_dev *edev,
+ struct qede_fastpath *fp, struct qede_tx_queue *txq)
+{
+ struct qed_chain *p_chain = &txq->tx_pbl;
+
+ /* Dump txq/fp/sb ids etc. other metadata */
DP_NOTICE(edev,
- "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n",
- txq->index, le16_to_cpu(*txq->hw_cons_ptr),
- qed_chain_get_cons_idx(&txq->tx_pbl),
- qed_chain_get_prod_idx(&txq->tx_pbl),
- jiffies);
+ "fpid 0x%x sbid 0x%x txqid [0x%x] ndev_qid [0x%x] cos [0x%x] p_chain %p cap %d size %d jiffies %lu HZ 0x%x\n",
+ fp->id, fp->sb_info->igu_sb_id, txq->index, txq->ndev_txq_id, txq->cos,
+ p_chain, p_chain->capacity, p_chain->size, jiffies, HZ);
+
+ /* Dump all the relevant prod/cons indexes */
+ DP_NOTICE(edev,
+ "hw cons %04x sw_tx_prod=0x%x, sw_tx_cons=0x%x, bd_prod 0x%x bd_cons 0x%x\n",
+ le16_to_cpu(*txq->hw_cons_ptr), txq->sw_tx_prod, txq->sw_tx_cons,
+ qed_chain_get_prod_idx(p_chain), qed_chain_get_cons_idx(p_chain));
+}
+
+static void
+qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_tx_queue *txq)
+{
+ struct qed_sb_info_dbg sb_dbg;
+ int rc;
+
+ /* sb info */
+ qede_fp_sb_dump(edev, fp);
+
+ memset(&sb_dbg, 0, sizeof(sb_dbg));
+ rc = edev->ops->common->get_sb_info(edev->cdev, fp->sb_info, (u16)fp->id, &sb_dbg);
+
+ DP_NOTICE(edev, "IGU: prod %08x cons %08x CAU Tx %04x\n",
+ sb_dbg.igu_prod, sb_dbg.igu_cons, sb_dbg.pi[TX_PI(txq->cos)]);
+
+ /* report to mfw */
+ edev->ops->common->mfw_report(edev->cdev,
+ "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n",
+ txq->index, le16_to_cpu(*txq->hw_cons_ptr),
+ qed_chain_get_cons_idx(&txq->tx_pbl),
+ qed_chain_get_prod_idx(&txq->tx_pbl), jiffies);
+ if (!rc)
+ edev->ops->common->mfw_report(edev->cdev,
+ "Txq[%d]: SB[0x%04x] - IGU: prod %08x cons %08x CAU Tx %04x\n",
+ txq->index, fp->sb_info->igu_sb_id,
+ sb_dbg.igu_prod, sb_dbg.igu_cons,
+ sb_dbg.pi[TX_PI(txq->cos)]);
}
static void qede_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct qede_dev *edev = netdev_priv(dev);
- struct qede_tx_queue *txq;
- int cos;
+ int i;
netif_carrier_off(dev);
DP_NOTICE(edev, "TX timeout on queue %u!\n", txqueue);
- if (!(edev->fp_array[txqueue].type & QEDE_FASTPATH_TX))
- return;
+ for_each_queue(i) {
+ struct qede_tx_queue *txq;
+ struct qede_fastpath *fp;
+ int cos;
- for_each_cos_in_txq(edev, cos) {
- txq = &edev->fp_array[txqueue].txq[cos];
+ fp = &edev->fp_array[i];
+ if (!(fp->type & QEDE_FASTPATH_TX))
+ continue;
- if (qed_chain_get_cons_idx(&txq->tx_pbl) !=
- qed_chain_get_prod_idx(&txq->tx_pbl))
- qede_tx_log_print(edev, txq);
+ for_each_cos_in_txq(edev, cos) {
+ txq = &fp->txq[cos];
+
+ /* Dump basic metadata for all queues */
+ qede_txq_fp_log_metadata(edev, fp, txq);
+
+ if (qed_chain_get_cons_idx(&txq->tx_pbl) !=
+ qed_chain_get_prod_idx(&txq->tx_pbl))
+ qede_tx_log_print(edev, fp, txq);
+ }
}
if (IS_VF(edev))
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 8c28fabb0ff6..39176e765767 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -304,11 +304,6 @@ int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr)
"HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n",
config.tx_type, config.rx_filter);
- if (config.flags) {
- DP_ERR(edev, "config.flags is reserved for future use\n");
- return -EINVAL;
- }
-
ptp->hw_ts_ioctl_called = 1;
ptp->tx_type = config.tx_type;
ptp->rx_filter = config.rx_filter;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 1e6d72adfe43..71523d747e93 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3480,20 +3480,19 @@ static int ql_adapter_up(struct ql3_adapter *qdev)
spin_lock_irqsave(&qdev->hw_lock, hw_flags);
- err = ql_wait_for_drvr_lock(qdev);
- if (err) {
- err = ql_adapter_initialize(qdev);
- if (err) {
- netdev_err(ndev, "Unable to initialize adapter\n");
- goto err_init;
- }
- netdev_err(ndev, "Releasing driver lock\n");
- ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
- } else {
+ if (!ql_wait_for_drvr_lock(qdev)) {
netdev_err(ndev, "Could not acquire driver lock\n");
+ err = -ENODEV;
goto err_lock;
}
+ err = ql_adapter_initialize(qdev);
+ if (err) {
+ netdev_err(ndev, "Unable to initialize adapter\n");
+ goto err_init;
+ }
+ ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
+
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
set_bit(QL_ADAPTER_UP, &qdev->flags);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index d51bac7ba5af..bd0607680329 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -1077,8 +1077,14 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
context_id = recv_ctx->context_id;
num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS;
- ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
- QLCNIC_CMD_ADD_RCV_RINGS);
+ err = ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_ADD_RCV_RINGS);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to alloc mbx args %d\n", err);
+ return err;
+ }
+
cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16);
/* set up status rings, mbx 2-81 */
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index ce09bd45527e..b215cde68e10 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2221,10 +2221,6 @@ static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
if (copy_from_user(&config, req->ifr_data, sizeof(config)))
return -EFAULT;
- /* Reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
tstamp_tx_ctrl = 0;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 223626290ce0..d947a628e166 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3368,8 +3368,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* MDIO bus init */
ret = sh_mdio_init(mdp, pd);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "MDIO init failed: %d\n", ret);
+ dev_err_probe(&pdev->dev, ret, "MDIO init failed\n");
goto out_release;
}
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 797e51802ccb..f0ef515e2ade 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1765,9 +1765,6 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
{
int rc;
- if (init->flags)
- return -EINVAL;
-
if ((init->tx_type != HWTSTAMP_TX_OFF) &&
(init->tx_type != HWTSTAMP_TX_ON))
return -ERANGE;
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 9160f9ed363a..6b5d96bced47 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -317,6 +317,7 @@ enum tx_frame_status {
tx_not_ls = 0x1,
tx_err = 0x2,
tx_dma_own = 0x4,
+ tx_err_bump_tc = 0x8,
};
enum dma_irq_status {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index cbf4429fb1d2..d3b4765c1a5b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -32,6 +32,8 @@ static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x,
return tx_not_ls;
if (unlikely(tdes3 & TDES3_ERROR_SUMMARY)) {
+ ret = tx_err;
+
if (unlikely(tdes3 & TDES3_JABBER_TIMEOUT))
x->tx_jabber++;
if (unlikely(tdes3 & TDES3_PACKET_FLUSHED))
@@ -53,16 +55,16 @@ static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(tdes3 & TDES3_EXCESSIVE_DEFERRAL))
x->tx_deferred++;
- if (unlikely(tdes3 & TDES3_UNDERFLOW_ERROR))
+ if (unlikely(tdes3 & TDES3_UNDERFLOW_ERROR)) {
x->tx_underflow++;
+ ret |= tx_err_bump_tc;
+ }
if (unlikely(tdes3 & TDES3_IP_HDR_ERROR))
x->tx_ip_header_error++;
if (unlikely(tdes3 & TDES3_PAYLOAD_ERROR))
x->tx_payload_error++;
-
- ret = tx_err;
}
if (unlikely(tdes3 & TDES3_DEFERRED))
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index cc1075c08996..c26ac288f981 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -132,6 +132,8 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
+static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
+ u32 rxmode, u32 chan);
#ifdef CONFIG_DEBUG_FS
static const struct net_device_ops stmmac_netdev_ops;
@@ -636,10 +638,6 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
__func__, config.flags, config.tx_type, config.rx_filter);
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
if (config.tx_type != HWTSTAMP_TX_OFF &&
config.tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
@@ -2466,6 +2464,21 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
return !!budget && work_done;
}
+static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
+{
+ if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
+ tc += 64;
+
+ if (priv->plat->force_thresh_dma_mode)
+ stmmac_set_dma_operation_mode(priv, tc, tc, chan);
+ else
+ stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
+ chan);
+
+ priv->xstats.threshold = tc;
+ }
+}
+
/**
* stmmac_tx_clean - to manage the transmission completion
* @priv: driver private structure
@@ -2531,6 +2544,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
/* ... verify the status error condition */
if (unlikely(status & tx_err)) {
priv->dev->stats.tx_errors++;
+ if (unlikely(status & tx_err_bump_tc))
+ stmmac_bump_dma_threshold(priv, queue);
} else {
priv->dev->stats.tx_packets++;
priv->xstats.tx_pkt_n++;
@@ -2781,21 +2796,7 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
for (chan = 0; chan < tx_channel_count; chan++) {
if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
/* Try to bump up the dma threshold on this failure */
- if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
- (tc <= 256)) {
- tc += 64;
- if (priv->plat->force_thresh_dma_mode)
- stmmac_set_dma_operation_mode(priv,
- tc,
- tc,
- chan);
- else
- stmmac_set_dma_operation_mode(priv,
- tc,
- SF_DMA_MODE,
- chan);
- priv->xstats.threshold = tc;
- }
+ stmmac_bump_dma_threshold(priv, chan);
} else if (unlikely(status[chan] == tx_hard_error)) {
stmmac_tx_err(priv, chan);
}
@@ -5532,8 +5533,6 @@ static int stmmac_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct stmmac_priv *priv = netdev_priv(netdev);
- bool sph_en;
- u32 chan;
/* Keep the COE Type in case of csum is supporting */
if (features & NETIF_F_RXCSUM)
@@ -5545,10 +5544,13 @@ static int stmmac_set_features(struct net_device *netdev,
*/
stmmac_rx_ipc(priv, priv->hw);
- sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ if (priv->sph_cap) {
+ bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ u32 chan;
- for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
- stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+ for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
+ stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+ }
return 0;
}
@@ -5744,21 +5746,7 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
if (unlikely(status & tx_hard_error_bump_tc)) {
/* Try to bump up the dma threshold on this failure */
- if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
- tc <= 256) {
- tc += 64;
- if (priv->plat->force_thresh_dma_mode)
- stmmac_set_dma_operation_mode(priv,
- tc,
- tc,
- chan);
- else
- stmmac_set_dma_operation_mode(priv,
- tc,
- SF_DMA_MODE,
- chan);
- priv->xstats.threshold = tc;
- }
+ stmmac_bump_dma_threshold(priv, chan);
} else if (unlikely(status == tx_hard_error)) {
stmmac_tx_err(priv, chan);
}
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index c99dd9735087..8624a044776f 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -626,10 +626,6 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
- /* reserved for future extensions */
- if (cfg.flags)
- return -EINVAL;
-
if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 33c1592d5381..751fb0bc65c5 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -2654,10 +2654,6 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
- /* reserved for future extensions */
- if (cfg.flags)
- return -EINVAL;
-
switch (cfg.tx_type) {
case HWTSTAMP_TX_OFF:
gbe_dev->tx_ts_enabled = 0;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index f50f9a43d3ea..f47b8358669d 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -595,24 +595,24 @@ spider_net_set_multi(struct net_device *netdev)
int i;
u32 reg;
struct spider_net_card *card = netdev_priv(netdev);
- DECLARE_BITMAP(bitmask, SPIDER_NET_MULTICAST_HASHES) = {};
+ DECLARE_BITMAP(bitmask, SPIDER_NET_MULTICAST_HASHES);
spider_net_set_promisc(card);
if (netdev->flags & IFF_ALLMULTI) {
- for (i = 0; i < SPIDER_NET_MULTICAST_HASHES; i++) {
- set_bit(i, bitmask);
- }
+ bitmap_fill(bitmask, SPIDER_NET_MULTICAST_HASHES);
goto write_hash;
}
+ bitmap_zero(bitmask, SPIDER_NET_MULTICAST_HASHES);
+
/* well, we know, what the broadcast hash value is: it's xfd
hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
- set_bit(0xfd, bitmask);
+ __set_bit(0xfd, bitmask);
netdev_for_each_mc_addr(ha, netdev) {
hash = spider_net_get_multicast_hash(netdev, ha->addr);
- set_bit(hash, bitmask);
+ __set_bit(hash, bitmask);
}
write_hash:
diff --git a/drivers/net/ethernet/vertexcom/Kconfig b/drivers/net/ethernet/vertexcom/Kconfig
new file mode 100644
index 000000000000..4184a635fe01
--- /dev/null
+++ b/drivers/net/ethernet/vertexcom/Kconfig
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Vertexcom network device configuration
+#
+
+config NET_VENDOR_VERTEXCOM
+ bool "Vertexcom devices"
+ default y
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Vertexcom cards. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_VERTEXCOM
+
+config MSE102X
+ tristate "Vertexcom MSE102x SPI"
+ depends on SPI
+ help
+ SPI driver for Vertexcom MSE102x SPI attached network chip.
+
+endif # NET_VENDOR_VERTEXCOM
diff --git a/drivers/net/ethernet/vertexcom/Makefile b/drivers/net/ethernet/vertexcom/Makefile
new file mode 100644
index 000000000000..f8b12e312637
--- /dev/null
+++ b/drivers/net/ethernet/vertexcom/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Vertexcom network device drivers.
+#
+
+obj-$(CONFIG_MSE102X) += mse102x.o
diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c
new file mode 100644
index 000000000000..a3c2426e5597
--- /dev/null
+++ b/drivers/net/ethernet/vertexcom/mse102x.c
@@ -0,0 +1,769 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2021 in-tech smart charging GmbH
+ *
+ * driver is based on micrel/ks8851_spi.c
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/cache.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <linux/spi/spi.h>
+#include <linux/of_net.h>
+
+#define MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER)
+
+#define DRV_NAME "mse102x"
+
+#define DET_CMD 0x0001
+#define DET_SOF 0x0002
+#define DET_DFT 0x55AA
+
+#define CMD_SHIFT 12
+#define CMD_RTS (0x1 << CMD_SHIFT)
+#define CMD_CTR (0x2 << CMD_SHIFT)
+
+#define CMD_MASK GENMASK(15, CMD_SHIFT)
+#define LEN_MASK GENMASK(CMD_SHIFT - 1, 0)
+
+#define DET_CMD_LEN 4
+#define DET_SOF_LEN 2
+#define DET_DFT_LEN 2
+
+#define MIN_FREQ_HZ 6000000
+#define MAX_FREQ_HZ 7142857
+
+struct mse102x_stats {
+ u64 xfer_err;
+ u64 invalid_cmd;
+ u64 invalid_ctr;
+ u64 invalid_dft;
+ u64 invalid_len;
+ u64 invalid_rts;
+ u64 invalid_sof;
+ u64 tx_timeout;
+};
+
+static const char mse102x_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "SPI transfer errors",
+ "Invalid command",
+ "Invalid CTR",
+ "Invalid DFT",
+ "Invalid frame length",
+ "Invalid RTS",
+ "Invalid SOF",
+ "TX timeout",
+};
+
+struct mse102x_net {
+ struct net_device *ndev;
+
+ u8 rxd[8];
+ u8 txd[8];
+
+ u32 msg_enable ____cacheline_aligned;
+
+ struct sk_buff_head txq;
+ struct mse102x_stats stats;
+};
+
+struct mse102x_net_spi {
+ struct mse102x_net mse102x;
+ struct mutex lock; /* Protect SPI frame transfer */
+ struct work_struct tx_work;
+ struct spi_device *spidev;
+ struct spi_message spi_msg;
+ struct spi_transfer spi_xfer;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *device_root;
+#endif
+};
+
+#define to_mse102x_spi(mse) container_of((mse), struct mse102x_net_spi, mse102x)
+
+#ifdef CONFIG_DEBUG_FS
+
+static int mse102x_info_show(struct seq_file *s, void *what)
+{
+ struct mse102x_net_spi *mses = s->private;
+
+ seq_printf(s, "TX ring size : %u\n",
+ skb_queue_len(&mses->mse102x.txq));
+
+ seq_printf(s, "IRQ : %d\n",
+ mses->spidev->irq);
+
+ seq_printf(s, "SPI effective speed : %lu\n",
+ (unsigned long)mses->spi_xfer.effective_speed_hz);
+ seq_printf(s, "SPI mode : %x\n",
+ mses->spidev->mode);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(mse102x_info);
+
+static void mse102x_init_device_debugfs(struct mse102x_net_spi *mses)
+{
+ mses->device_root = debugfs_create_dir(dev_name(&mses->mse102x.ndev->dev),
+ NULL);
+
+ debugfs_create_file("info", S_IFREG | 0444, mses->device_root, mses,
+ &mse102x_info_fops);
+}
+
+static void mse102x_remove_device_debugfs(struct mse102x_net_spi *mses)
+{
+ debugfs_remove_recursive(mses->device_root);
+}
+
+#else /* CONFIG_DEBUG_FS */
+
+static void mse102x_init_device_debugfs(struct mse102x_net_spi *mses)
+{
+}
+
+static void mse102x_remove_device_debugfs(struct mse102x_net_spi *mses)
+{
+}
+
+#endif
+
+/* SPI register read/write calls.
+ *
+ * All these calls issue SPI transactions to access the chip's registers. They
+ * all require that the necessary lock is held to prevent accesses when the
+ * chip is busy transferring packet data.
+ */
+
+static void mse102x_tx_cmd_spi(struct mse102x_net *mse, u16 cmd)
+{
+ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+ struct spi_transfer *xfer = &mses->spi_xfer;
+ struct spi_message *msg = &mses->spi_msg;
+ __be16 txb[2];
+ int ret;
+
+ txb[0] = cpu_to_be16(DET_CMD);
+ txb[1] = cpu_to_be16(cmd);
+
+ xfer->tx_buf = txb;
+ xfer->rx_buf = NULL;
+ xfer->len = DET_CMD_LEN;
+
+ ret = spi_sync(mses->spidev, msg);
+ if (ret < 0) {
+ netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n",
+ __func__, ret);
+ mse->stats.xfer_err++;
+ }
+}
+
+static int mse102x_rx_cmd_spi(struct mse102x_net *mse, u8 *rxb)
+{
+ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+ struct spi_transfer *xfer = &mses->spi_xfer;
+ struct spi_message *msg = &mses->spi_msg;
+ __be16 *txb = (__be16 *)mse->txd;
+ __be16 *cmd = (__be16 *)mse->rxd;
+ u8 *trx = mse->rxd;
+ int ret;
+
+ txb[0] = 0;
+ txb[1] = 0;
+
+ xfer->tx_buf = txb;
+ xfer->rx_buf = trx;
+ xfer->len = DET_CMD_LEN;
+
+ ret = spi_sync(mses->spidev, msg);
+ if (ret < 0) {
+ netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n",
+ __func__, ret);
+ mse->stats.xfer_err++;
+ } else if (*cmd != cpu_to_be16(DET_CMD)) {
+ net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
+ __func__, *cmd);
+ mse->stats.invalid_cmd++;
+ ret = -EIO;
+ } else {
+ memcpy(rxb, trx + 2, 2);
+ }
+
+ return ret;
+}
+
+static inline void mse102x_push_header(struct sk_buff *skb)
+{
+ __be16 *header = skb_push(skb, DET_SOF_LEN);
+
+ *header = cpu_to_be16(DET_SOF);
+}
+
+static inline void mse102x_put_footer(struct sk_buff *skb)
+{
+ __be16 *footer = skb_put(skb, DET_DFT_LEN);
+
+ *footer = cpu_to_be16(DET_DFT);
+}
+
+static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp,
+ unsigned int pad)
+{
+ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+ struct spi_transfer *xfer = &mses->spi_xfer;
+ struct spi_message *msg = &mses->spi_msg;
+ struct sk_buff *tskb;
+ int ret;
+
+ netif_dbg(mse, tx_queued, mse->ndev, "%s: skb %p, %d@%p\n",
+ __func__, txp, txp->len, txp->data);
+
+ if ((skb_headroom(txp) < DET_SOF_LEN) ||
+ (skb_tailroom(txp) < DET_DFT_LEN + pad)) {
+ tskb = skb_copy_expand(txp, DET_SOF_LEN, DET_DFT_LEN + pad,
+ GFP_KERNEL);
+ if (!tskb)
+ return -ENOMEM;
+
+ dev_kfree_skb(txp);
+ txp = tskb;
+ }
+
+ mse102x_push_header(txp);
+
+ if (pad)
+ skb_put_zero(txp, pad);
+
+ mse102x_put_footer(txp);
+
+ xfer->tx_buf = txp->data;
+ xfer->rx_buf = NULL;
+ xfer->len = txp->len;
+
+ ret = spi_sync(mses->spidev, msg);
+ if (ret < 0) {
+ netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n",
+ __func__, ret);
+ mse->stats.xfer_err++;
+ }
+
+ return ret;
+}
+
+static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff,
+ unsigned int frame_len)
+{
+ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+ struct spi_transfer *xfer = &mses->spi_xfer;
+ struct spi_message *msg = &mses->spi_msg;
+ __be16 *sof = (__be16 *)buff;
+ __be16 *dft = (__be16 *)(buff + DET_SOF_LEN + frame_len);
+ int ret;
+
+ xfer->rx_buf = buff;
+ xfer->tx_buf = NULL;
+ xfer->len = DET_SOF_LEN + frame_len + DET_DFT_LEN;
+
+ ret = spi_sync(mses->spidev, msg);
+ if (ret < 0) {
+ netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n",
+ __func__, ret);
+ mse->stats.xfer_err++;
+ } else if (*sof != cpu_to_be16(DET_SOF)) {
+ netdev_dbg(mse->ndev, "%s: SPI start of frame is invalid (0x%04x)\n",
+ __func__, *sof);
+ mse->stats.invalid_sof++;
+ ret = -EIO;
+ } else if (*dft != cpu_to_be16(DET_DFT)) {
+ netdev_dbg(mse->ndev, "%s: SPI frame tail is invalid (0x%04x)\n",
+ __func__, *dft);
+ mse->stats.invalid_dft++;
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static void mse102x_dump_packet(const char *msg, int len, const char *data)
+{
+ printk(KERN_DEBUG ": %s - packet len:%d\n", msg, len);
+ print_hex_dump(KERN_DEBUG, "pk data: ", DUMP_PREFIX_OFFSET, 16, 1,
+ data, len, true);
+}
+
+static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
+{
+ struct sk_buff *skb;
+ unsigned int rxalign;
+ unsigned int rxlen;
+ __be16 rx = 0;
+ u16 cmd_resp;
+ u8 *rxpkt;
+ int ret;
+
+ mse102x_tx_cmd_spi(mse, CMD_CTR);
+ ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx);
+ cmd_resp = be16_to_cpu(rx);
+
+ if (ret || ((cmd_resp & CMD_MASK) != CMD_RTS)) {
+ usleep_range(50, 100);
+
+ mse102x_tx_cmd_spi(mse, CMD_CTR);
+ ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx);
+ if (ret)
+ return;
+
+ cmd_resp = be16_to_cpu(rx);
+ if ((cmd_resp & CMD_MASK) != CMD_RTS) {
+ net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
+ __func__, cmd_resp);
+ mse->stats.invalid_rts++;
+ return;
+ }
+
+ net_dbg_ratelimited("%s: Unexpected response to first CMD\n",
+ __func__);
+ }
+
+ rxlen = cmd_resp & LEN_MASK;
+ if (!rxlen) {
+ net_dbg_ratelimited("%s: No frame length defined\n", __func__);
+ mse->stats.invalid_len++;
+ return;
+ }
+
+ rxalign = ALIGN(rxlen + DET_SOF_LEN + DET_DFT_LEN, 4);
+ skb = netdev_alloc_skb_ip_align(mse->ndev, rxalign);
+ if (!skb)
+ return;
+
+ /* 2 bytes Start of frame (before ethernet header)
+ * 2 bytes Data frame tail (after ethernet frame)
+ * They are copied, but ignored.
+ */
+ rxpkt = skb_put(skb, rxlen) - DET_SOF_LEN;
+ if (mse102x_rx_frame_spi(mse, rxpkt, rxlen)) {
+ mse->ndev->stats.rx_errors++;
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ if (netif_msg_pktdata(mse))
+ mse102x_dump_packet(__func__, skb->len, skb->data);
+
+ skb->protocol = eth_type_trans(skb, mse->ndev);
+ netif_rx_ni(skb);
+
+ mse->ndev->stats.rx_packets++;
+ mse->ndev->stats.rx_bytes += rxlen;
+}
+
+static int mse102x_tx_pkt_spi(struct mse102x_net *mse, struct sk_buff *txb,
+ unsigned long work_timeout)
+{
+ unsigned int pad = 0;
+ __be16 rx = 0;
+ u16 cmd_resp;
+ int ret;
+ bool first = true;
+
+ if (txb->len < 60)
+ pad = 60 - txb->len;
+
+ while (1) {
+ mse102x_tx_cmd_spi(mse, CMD_RTS | (txb->len + pad));
+ ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx);
+ cmd_resp = be16_to_cpu(rx);
+
+ if (!ret) {
+ /* ready to send frame ? */
+ if (cmd_resp == CMD_CTR)
+ break;
+
+ net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
+ __func__, cmd_resp);
+ mse->stats.invalid_ctr++;
+ }
+
+ /* It's not predictable how long / many retries it takes to
+ * send at least one packet, so TX timeouts are possible.
+ * That's the reason why the netdev watchdog is not used here.
+ */
+ if (time_after(jiffies, work_timeout))
+ return -ETIMEDOUT;
+
+ if (first) {
+ /* throttle at first issue */
+ netif_stop_queue(mse->ndev);
+ /* fast retry */
+ usleep_range(50, 100);
+ first = false;
+ } else {
+ msleep(20);
+ }
+ };
+
+ ret = mse102x_tx_frame_spi(mse, txb, pad);
+ if (ret)
+ net_dbg_ratelimited("%s: Failed to send (%d), drop frame\n",
+ __func__, ret);
+
+ return ret;
+}
+
+#define TX_QUEUE_MAX 10
+
+static void mse102x_tx_work(struct work_struct *work)
+{
+ /* Make sure timeout is sufficient to transfer TX_QUEUE_MAX frames */
+ unsigned long work_timeout = jiffies + msecs_to_jiffies(1000);
+ struct mse102x_net_spi *mses;
+ struct mse102x_net *mse;
+ struct sk_buff *txb;
+ int ret = 0;
+
+ mses = container_of(work, struct mse102x_net_spi, tx_work);
+ mse = &mses->mse102x;
+
+ while ((txb = skb_dequeue(&mse->txq))) {
+ mutex_lock(&mses->lock);
+ ret = mse102x_tx_pkt_spi(mse, txb, work_timeout);
+ mutex_unlock(&mses->lock);
+ if (ret) {
+ mse->ndev->stats.tx_dropped++;
+ } else {
+ mse->ndev->stats.tx_bytes += txb->len;
+ mse->ndev->stats.tx_packets++;
+ }
+
+ dev_kfree_skb(txb);
+ }
+
+ if (ret == -ETIMEDOUT) {
+ if (netif_msg_timer(mse))
+ netdev_err(mse->ndev, "tx work timeout\n");
+
+ mse->stats.tx_timeout++;
+ }
+
+ netif_wake_queue(mse->ndev);
+}
+
+static netdev_tx_t mse102x_start_xmit_spi(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct mse102x_net *mse = netdev_priv(ndev);
+ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+
+ netif_dbg(mse, tx_queued, ndev,
+ "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data);
+
+ skb_queue_tail(&mse->txq, skb);
+
+ if (skb_queue_len(&mse->txq) >= TX_QUEUE_MAX)
+ netif_stop_queue(ndev);
+
+ schedule_work(&mses->tx_work);
+
+ return NETDEV_TX_OK;
+}
+
+static void mse102x_init_mac(struct mse102x_net *mse, struct device_node *np)
+{
+ struct net_device *ndev = mse->ndev;
+ int ret = of_get_ethdev_address(np, ndev);
+
+ if (ret) {
+ eth_hw_addr_random(ndev);
+ netdev_err(ndev, "Using random MAC address: %pM\n",
+ ndev->dev_addr);
+ }
+}
+
+/* Assumption: this is called for every incoming packet */
+static irqreturn_t mse102x_irq(int irq, void *_mse)
+{
+ struct mse102x_net *mse = _mse;
+ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+
+ mutex_lock(&mses->lock);
+ mse102x_rx_pkt_spi(mse);
+ mutex_unlock(&mses->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int mse102x_net_open(struct net_device *ndev)
+{
+ struct mse102x_net *mse = netdev_priv(ndev);
+ int ret;
+
+ ret = request_threaded_irq(ndev->irq, NULL, mse102x_irq, IRQF_ONESHOT,
+ ndev->name, mse);
+ if (ret < 0) {
+ netdev_err(ndev, "Failed to get irq: %d\n", ret);
+ return ret;
+ }
+
+ netif_dbg(mse, ifup, ndev, "opening\n");
+
+ netif_start_queue(ndev);
+
+ netif_carrier_on(ndev);
+
+ netif_dbg(mse, ifup, ndev, "network device up\n");
+
+ return 0;
+}
+
+static int mse102x_net_stop(struct net_device *ndev)
+{
+ struct mse102x_net *mse = netdev_priv(ndev);
+ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+
+ netif_info(mse, ifdown, ndev, "shutting down\n");
+
+ netif_carrier_off(mse->ndev);
+
+ /* stop any outstanding work */
+ flush_work(&mses->tx_work);
+
+ netif_stop_queue(ndev);
+
+ skb_queue_purge(&mse->txq);
+
+ free_irq(ndev->irq, mse);
+
+ return 0;
+}
+
+static const struct net_device_ops mse102x_netdev_ops = {
+ .ndo_open = mse102x_net_open,
+ .ndo_stop = mse102x_net_stop,
+ .ndo_start_xmit = mse102x_start_xmit_spi,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+/* ethtool support */
+
+static void mse102x_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *di)
+{
+ strscpy(di->driver, DRV_NAME, sizeof(di->driver));
+ strscpy(di->bus_info, dev_name(ndev->dev.parent), sizeof(di->bus_info));
+}
+
+static u32 mse102x_get_msglevel(struct net_device *ndev)
+{
+ struct mse102x_net *mse = netdev_priv(ndev);
+
+ return mse->msg_enable;
+}
+
+static void mse102x_set_msglevel(struct net_device *ndev, u32 to)
+{
+ struct mse102x_net *mse = netdev_priv(ndev);
+
+ mse->msg_enable = to;
+}
+
+static void mse102x_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *estats, u64 *data)
+{
+ struct mse102x_net *mse = netdev_priv(ndev);
+ struct mse102x_stats *st = &mse->stats;
+
+ memcpy(data, st, ARRAY_SIZE(mse102x_gstrings_stats) * sizeof(u64));
+}
+
+static void mse102x_get_strings(struct net_device *ndev, u32 stringset, u8 *buf)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buf, &mse102x_gstrings_stats,
+ sizeof(mse102x_gstrings_stats));
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static int mse102x_get_sset_count(struct net_device *ndev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(mse102x_gstrings_stats);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct ethtool_ops mse102x_ethtool_ops = {
+ .get_drvinfo = mse102x_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = mse102x_get_msglevel,
+ .set_msglevel = mse102x_set_msglevel,
+ .get_ethtool_stats = mse102x_get_ethtool_stats,
+ .get_strings = mse102x_get_strings,
+ .get_sset_count = mse102x_get_sset_count,
+};
+
+/* driver bus management functions */
+
+#ifdef CONFIG_PM_SLEEP
+
+static int mse102x_suspend(struct device *dev)
+{
+ struct mse102x_net *mse = dev_get_drvdata(dev);
+ struct net_device *ndev = mse->ndev;
+
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ mse102x_net_stop(ndev);
+ }
+
+ return 0;
+}
+
+static int mse102x_resume(struct device *dev)
+{
+ struct mse102x_net *mse = dev_get_drvdata(dev);
+ struct net_device *ndev = mse->ndev;
+
+ if (netif_running(ndev)) {
+ mse102x_net_open(ndev);
+ netif_device_attach(ndev);
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mse102x_pm_ops, mse102x_suspend, mse102x_resume);
+
+static int mse102x_probe_spi(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct mse102x_net_spi *mses;
+ struct net_device *ndev;
+ struct mse102x_net *mse;
+ int ret;
+
+ spi->bits_per_word = 8;
+ spi->mode |= SPI_MODE_3;
+ /* enforce minimum speed to ensure device functionality */
+ spi->master->min_speed_hz = MIN_FREQ_HZ;
+
+ if (!spi->max_speed_hz)
+ spi->max_speed_hz = MAX_FREQ_HZ;
+
+ if (spi->max_speed_hz < MIN_FREQ_HZ ||
+ spi->max_speed_hz > MAX_FREQ_HZ) {
+ dev_err(&spi->dev, "SPI max frequency out of range (min: %u, max: %u)\n",
+ MIN_FREQ_HZ, MAX_FREQ_HZ);
+ return -EINVAL;
+ }
+
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(&spi->dev, "Unable to setup SPI device: %d\n", ret);
+ return ret;
+ }
+
+ ndev = devm_alloc_etherdev(dev, sizeof(struct mse102x_net_spi));
+ if (!ndev)
+ return -ENOMEM;
+
+ ndev->needed_tailroom += ALIGN(DET_DFT_LEN, 4);
+ ndev->needed_headroom += ALIGN(DET_SOF_LEN, 4);
+ ndev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ ndev->tx_queue_len = 100;
+
+ mse = netdev_priv(ndev);
+ mses = to_mse102x_spi(mse);
+
+ mses->spidev = spi;
+ mutex_init(&mses->lock);
+ INIT_WORK(&mses->tx_work, mse102x_tx_work);
+
+ /* initialise pre-made spi transfer messages */
+ spi_message_init(&mses->spi_msg);
+ spi_message_add_tail(&mses->spi_xfer, &mses->spi_msg);
+
+ ndev->irq = spi->irq;
+ mse->ndev = ndev;
+
+ /* set the default message enable */
+ mse->msg_enable = netif_msg_init(-1, MSG_DEFAULT);
+
+ skb_queue_head_init(&mse->txq);
+
+ SET_NETDEV_DEV(ndev, dev);
+
+ dev_set_drvdata(dev, mse);
+
+ netif_carrier_off(mse->ndev);
+ ndev->netdev_ops = &mse102x_netdev_ops;
+ ndev->ethtool_ops = &mse102x_ethtool_ops;
+
+ mse102x_init_mac(mse, dev->of_node);
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(dev, "failed to register network device: %d\n", ret);
+ return ret;
+ }
+
+ mse102x_init_device_debugfs(mses);
+
+ return 0;
+}
+
+static int mse102x_remove_spi(struct spi_device *spi)
+{
+ struct mse102x_net *mse = dev_get_drvdata(&spi->dev);
+ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+
+ if (netif_msg_drv(mse))
+ dev_info(&spi->dev, "remove\n");
+
+ mse102x_remove_device_debugfs(mses);
+ unregister_netdev(mse->ndev);
+
+ return 0;
+}
+
+static const struct of_device_id mse102x_match_table[] = {
+ { .compatible = "vertexcom,mse1021" },
+ { .compatible = "vertexcom,mse1022" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mse102x_match_table);
+
+static struct spi_driver mse102x_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = mse102x_match_table,
+ .pm = &mse102x_pm_ops,
+ },
+ .probe = mse102x_probe_spi,
+ .remove = mse102x_remove_spi,
+};
+module_spi_driver(mse102x_driver);
+
+MODULE_DESCRIPTION("MSE102x Network driver");
+MODULE_AUTHOR("Stefan Wahren <stefan.wahren@in-tech.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index e39356364f33..23ac353b35fe 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -2051,6 +2051,7 @@ static int axienet_probe(struct platform_device *pdev)
lp->phylink_config.dev = &ndev->dev;
lp->phylink_config.type = PHYLINK_NETDEV;
+ lp->phylink_config.legacy_pre_march2020 = true;
lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
MAC_10FD | MAC_100FD | MAC_1000FD;
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 65fdad1107fc..df77a22d1b81 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -382,9 +382,6 @@ static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
- if (cfg.flags) /* reserved for future extensions */
- return -EINVAL;
-
ret = ixp46x_ptp_find(&port->timesync_regs, &port->phc_index);
if (ret)
return ret;