summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c1
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/Makefile9
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c2
-rw-r--r--drivers/net/ethernet/alteon/acenic.c6
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c5
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c117
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h20
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c12
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c47
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c51
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c52
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h35
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c69
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/ver.h4
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/aurora/Kconfig1
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c2
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig12
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c223
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h11
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c233
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h33
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h66
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c89
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c121
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c375
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h37
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h1214
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c1
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c11
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c14
-rw-r--r--drivers/net/ethernet/cadence/Kconfig6
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c88
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c1
-rw-r--r--drivers/net/ethernet/cavium/Kconfig6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c47
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c35
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c35
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c40
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h9
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c26
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c57
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h33
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c708
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c186
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c163
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c376
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c54
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h29
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h12
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.c2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c135
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/Kconfig40
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h57
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c96
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c88
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h3
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c39
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c21
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c3
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.h1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c27
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.h1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c5
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.h1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c12
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.h2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c21
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.h1
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c3
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c18
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c9
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig15
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c30
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c22
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c25
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c56
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h28
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c408
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h21
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c115
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h120
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c676
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h48
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c30
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c75
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c27
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c26
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c177
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c36
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c18
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c168
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h33
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c347
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c69
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c13
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h16
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c266
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c42
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c365
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c1
-rw-r--r--drivers/net/ethernet/jme.c4
-rw-r--r--drivers/net/ethernet/lantiq_etop.c10
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c440
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.c15
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.h8
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h134
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c973
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h203
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c703
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c179
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c223
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h75
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c71
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/crdump.c239
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c220
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c947
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h175
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h274
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h210
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c307
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c117
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c603
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c278
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c269
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c108
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c207
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c230
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/vxlan.h)43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.c190
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c128
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h57
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c117
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h91
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/emad.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/ib.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h973
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c473
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h260
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c244
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c428
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c342
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c239
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c271
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c132
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c536
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c196
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c1168
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c116
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c285
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h124
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c438
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h228
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c302
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c193
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c463
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h46
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c354
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c344
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h34
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c84
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h34
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchib.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/txheader.h36
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c3
-rw-r--r--drivers/net/ethernet/microchip/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/Makefile2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c723
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.h11
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c284
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h234
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c1160
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.h74
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c445
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c2
-rw-r--r--drivers/net/ethernet/neterion/Kconfig23
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c31
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c25
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/fw.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c313
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c57
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h58
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c79
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c92
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c213
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h37
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c51
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c19
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h26
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.h29
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c20
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h19
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c169
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c58
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c22
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c6
-rw-r--r--drivers/net/ethernet/ni/nixge.c12
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c4
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Makefile2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h40
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c262
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h35
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c19
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c193
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h2
-rw-r--r--drivers/net/ethernet/packetengines/Kconfig6
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c10
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c127
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c28
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c67
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c56
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h20
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c138
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c423
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c31
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c195
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_mpi.c1
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c2
-rw-r--r--drivers/net/ethernet/realtek/Kconfig2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c1082
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c9
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c47
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h14
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/Makefile2
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c5
-rw-r--r--drivers/net/ethernet/sfc/efx.c12
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c1
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h3
-rw-r--r--drivers/net/ethernet/sfc/rx.c7
-rw-r--r--drivers/net/ethernet/smsc/epic100.c2
-rw-r--r--drivers/net/ethernet/socionext/netsec.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c69
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h228
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c371
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c280
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c411
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c71
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c133
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c60
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c3
-rw-r--r--drivers/net/ethernet/sun/niu.c16
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c3
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c463
-rw-r--r--drivers/net/ethernet/ti/cpts.c6
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c31
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c13
-rw-r--r--drivers/net/ethernet/ti/tlan.c1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c112
449 files changed, 27830 insertions, 9679 deletions
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index d422a124cd7c..0b6bbf63f7ca 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -610,6 +610,7 @@ static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
switch (cmd) {
case SIOCGMIIPHY:
data->phy_id = info->phy_id;
+ /* Fall through */
case SIOCGMIIREG: /* Read MII PHY register. */
data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f);
return 0;
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index af766fd61151..6fde68aa13a4 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -81,7 +81,6 @@ source "drivers/net/ethernet/huawei/Kconfig"
source "drivers/net/ethernet/i825xx/Kconfig"
source "drivers/net/ethernet/ibm/Kconfig"
source "drivers/net/ethernet/intel/Kconfig"
-source "drivers/net/ethernet/neterion/Kconfig"
source "drivers/net/ethernet/xscale/Kconfig"
config JME
@@ -128,6 +127,7 @@ config FEALNX
cards. <http://www.myson.com.tw/>
source "drivers/net/ethernet/natsemi/Kconfig"
+source "drivers/net/ethernet/neterion/Kconfig"
source "drivers/net/ethernet/netronome/Kconfig"
source "drivers/net/ethernet/ni/Kconfig"
source "drivers/net/ethernet/8390/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 8fbfe9ce2fa5..b45d5f626b59 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -20,7 +20,7 @@ obj-$(CONFIG_NET_VENDOR_AQUANTIA) += aquantia/
obj-$(CONFIG_NET_VENDOR_ARC) += arc/
obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/
-obj-$(CONFIG_NET_CADENCE) += cadence/
+obj-$(CONFIG_NET_VENDOR_CADENCE) += cadence/
obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
@@ -36,7 +36,6 @@ obj-$(CONFIG_NET_VENDOR_DEC) += dec/
obj-$(CONFIG_NET_VENDOR_DLINK) += dlink/
obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/
obj-$(CONFIG_NET_VENDOR_EZCHIP) += ezchip/
-obj-$(CONFIG_NET_VENDOR_EXAR) += neterion/
obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/
obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/
@@ -60,6 +59,7 @@ obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/
obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
+obj-$(CONFIG_NET_VENDOR_NETERION) += neterion/
obj-$(CONFIG_NET_VENDOR_NETRONOME) += netronome/
obj-$(CONFIG_NET_VENDOR_NI) += ni/
obj-$(CONFIG_NET_NETX) += netx-eth.o
@@ -68,7 +68,7 @@ obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/
obj-$(CONFIG_LPC_ENET) += nxp/
obj-$(CONFIG_NET_VENDOR_OKI) += oki-semi/
obj-$(CONFIG_ETHOC) += ethoc.o
-obj-$(CONFIG_NET_PACKET_ENGINE) += packetengines/
+obj-$(CONFIG_NET_VENDOR_PACKET_ENGINES) += packetengines/
obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/
obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/
obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/
@@ -80,8 +80,7 @@ obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/
obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/
obj-$(CONFIG_NET_VENDOR_SILAN) += silan/
obj-$(CONFIG_NET_VENDOR_SIS) += sis/
-obj-$(CONFIG_SFC) += sfc/
-obj-$(CONFIG_SFC_FALCON) += sfc/falcon/
+obj-$(CONFIG_NET_VENDOR_SOLARFLARE) += sfc/
obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
obj-$(CONFIG_NET_VENDOR_SOCIONEXT) += socionext/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 3872ab96b80a..097467f44b0d 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -802,7 +802,7 @@ static int starfire_init_one(struct pci_dev *pdev,
int mii_status;
for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
- mdelay(100);
+ msleep(100);
boguscnt = 1000;
while (--boguscnt > 0)
if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 8f71b79b4949..4f11f98347ed 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -551,6 +551,7 @@ static int acenic_probe_one(struct pci_dev *pdev,
ap->name);
break;
}
+ /* Fall through */
case PCI_VENDOR_ID_SGI:
printk(KERN_INFO "%s: SGI AceNIC ", ap->name);
break;
@@ -1933,7 +1934,7 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
while (idx != rxretprd) {
struct ring_info *rip;
struct sk_buff *skb;
- struct rx_desc *rxdesc, *retdesc;
+ struct rx_desc *retdesc;
u32 skbidx;
int bd_flags, desc_type, mapsize;
u16 csum;
@@ -1959,19 +1960,16 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
case 0:
rip = &ap->skb->rx_std_skbuff[skbidx];
mapsize = ACE_STD_BUFSIZE;
- rxdesc = &ap->rx_std_ring[skbidx];
std_count++;
break;
case BD_FLG_JUMBO:
rip = &ap->skb->rx_jumbo_skbuff[skbidx];
mapsize = ACE_JUMBO_BUFSIZE;
- rxdesc = &ap->rx_jumbo_ring[skbidx];
atomic_dec(&ap->cur_jumbo_bufs);
break;
case BD_FLG_MINI:
rip = &ap->skb->rx_mini_skbuff[skbidx];
mapsize = ACE_MINI_BUFSIZE;
- rxdesc = &ap->rx_mini_ring[skbidx];
mini_count++;
break;
default:
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index f2af87d70594..c673ac2df65b 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2213,7 +2213,8 @@ static void ena_netpoll(struct net_device *netdev)
#endif /* CONFIG_NET_POLL_CONTROLLER */
static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
u16 qid;
/* we suspect that this is good for in--kernel network services that
@@ -2223,7 +2224,7 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
if (skb_rx_queue_recorded(skb))
qid = skb_get_rx_queue(skb);
else
- qid = fallback(dev, skb);
+ qid = fallback(dev, skb, NULL);
return qid;
}
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index be198cc0b10c..f5ad12c10934 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -2036,22 +2036,22 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
}
lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!lp->tx_dma_addr)
return -ENOMEM;
lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!lp->rx_dma_addr)
return -ENOMEM;
lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!lp->tx_skbuff)
return -ENOMEM;
lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!lp->rx_skbuff)
return -ENOMEM;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index cc1e4f820e64..533094233659 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -289,7 +289,7 @@ static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
struct page *pages = NULL;
dma_addr_t pages_dma;
gfp_t gfp;
- int order, ret;
+ int order;
again:
order = alloc_order;
@@ -316,10 +316,9 @@ again:
/* Map the pages */
pages_dma = dma_map_page(pdata->dev, pages, 0,
PAGE_SIZE << order, DMA_FROM_DEVICE);
- ret = dma_mapping_error(pdata->dev, pages_dma);
- if (ret) {
+ if (dma_mapping_error(pdata->dev, pages_dma)) {
put_page(pages);
- return ret;
+ return -ENOMEM;
}
pa->pages = pages;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index f2d8063a2cef..08c9fa6ca71f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -11,6 +11,7 @@
#include "aq_ethtool.h"
#include "aq_nic.h"
+#include "aq_vec.h"
static void aq_ethtool_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *p)
@@ -284,6 +285,117 @@ static int aq_ethtool_set_coalesce(struct net_device *ndev,
return aq_nic_update_interrupt_moderation_settings(aq_nic);
}
+static int aq_ethtool_nway_reset(struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ if (unlikely(!aq_nic->aq_fw_ops->renegotiate))
+ return -EOPNOTSUPP;
+
+ if (netif_running(ndev))
+ return aq_nic->aq_fw_ops->renegotiate(aq_nic->aq_hw);
+
+ return 0;
+}
+
+static void aq_ethtool_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ pause->autoneg = 0;
+
+ if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
+ pause->rx_pause = 1;
+ if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_TX)
+ pause->tx_pause = 1;
+}
+
+static int aq_ethtool_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ int err = 0;
+
+ if (!aq_nic->aq_fw_ops->set_flow_control)
+ return -EOPNOTSUPP;
+
+ if (pause->autoneg == AUTONEG_ENABLE)
+ return -EOPNOTSUPP;
+
+ if (pause->rx_pause)
+ aq_nic->aq_hw->aq_nic_cfg->flow_control |= AQ_NIC_FC_RX;
+ else
+ aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_RX;
+
+ if (pause->tx_pause)
+ aq_nic->aq_hw->aq_nic_cfg->flow_control |= AQ_NIC_FC_TX;
+ else
+ aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_TX;
+
+ err = aq_nic->aq_fw_ops->set_flow_control(aq_nic->aq_hw);
+
+ return err;
+}
+
+static void aq_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(aq_nic);
+
+ ring->rx_pending = aq_nic_cfg->rxds;
+ ring->tx_pending = aq_nic_cfg->txds;
+
+ ring->rx_max_pending = aq_nic_cfg->aq_hw_caps->rxds_max;
+ ring->tx_max_pending = aq_nic_cfg->aq_hw_caps->txds_max;
+}
+
+static int aq_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring)
+{
+ int err = 0;
+ bool ndev_running = false;
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(aq_nic);
+ const struct aq_hw_caps_s *hw_caps = aq_nic_cfg->aq_hw_caps;
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
+ err = -EOPNOTSUPP;
+ goto err_exit;
+ }
+
+ if (netif_running(ndev)) {
+ ndev_running = true;
+ dev_close(ndev);
+ }
+
+ aq_nic_free_vectors(aq_nic);
+
+ aq_nic_cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min);
+ aq_nic_cfg->rxds = min(aq_nic_cfg->rxds, hw_caps->rxds_max);
+ aq_nic_cfg->rxds = ALIGN(aq_nic_cfg->rxds, AQ_HW_RXD_MULTIPLE);
+
+ aq_nic_cfg->txds = max(ring->tx_pending, hw_caps->txds_min);
+ aq_nic_cfg->txds = min(aq_nic_cfg->txds, hw_caps->txds_max);
+ aq_nic_cfg->txds = ALIGN(aq_nic_cfg->txds, AQ_HW_TXD_MULTIPLE);
+
+ for (aq_nic->aq_vecs = 0; aq_nic->aq_vecs < aq_nic_cfg->vecs;
+ aq_nic->aq_vecs++) {
+ aq_nic->aq_vec[aq_nic->aq_vecs] =
+ aq_vec_alloc(aq_nic, aq_nic->aq_vecs, aq_nic_cfg);
+ if (unlikely(!aq_nic->aq_vec[aq_nic->aq_vecs])) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ }
+ if (ndev_running)
+ err = dev_open(ndev);
+
+err_exit:
+ return err;
+}
+
const struct ethtool_ops aq_ethtool_ops = {
.get_link = aq_ethtool_get_link,
.get_regs_len = aq_ethtool_get_regs_len,
@@ -291,6 +403,11 @@ const struct ethtool_ops aq_ethtool_ops = {
.get_drvinfo = aq_ethtool_get_drvinfo,
.get_strings = aq_ethtool_get_strings,
.get_rxfh_indir_size = aq_ethtool_get_rss_indir_size,
+ .nway_reset = aq_ethtool_nway_reset,
+ .get_ringparam = aq_get_ringparam,
+ .set_ringparam = aq_set_ringparam,
+ .get_pauseparam = aq_ethtool_get_pauseparam,
+ .set_pauseparam = aq_ethtool_set_pauseparam,
.get_rxfh_key_size = aq_ethtool_get_rss_key_size,
.get_rxfh = aq_ethtool_get_rss,
.get_rxnfc = aq_ethtool_get_rxnfc,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 2c6ebd91a9f2..5c00671f248d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -24,8 +24,10 @@ struct aq_hw_caps_s {
u64 link_speed_msk;
unsigned int hw_priv_flags;
u32 media_type;
- u32 rxds;
- u32 txds;
+ u32 rxds_max;
+ u32 txds_max;
+ u32 rxds_min;
+ u32 txds_min;
u32 txhwb_alignment;
u32 irq_mask;
u32 vecs;
@@ -98,6 +100,9 @@ struct aq_stats_s {
#define AQ_HW_MEDIA_TYPE_TP 1U
#define AQ_HW_MEDIA_TYPE_FIBRE 2U
+#define AQ_HW_TXD_MULTIPLE 8U
+#define AQ_HW_RXD_MULTIPLE 8U
+
#define AQ_HW_MULTICAST_ADDRESS_MAX 32U
struct aq_hw_s {
@@ -199,25 +204,30 @@ struct aq_hw_ops {
int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
- int (*hw_deinit)(struct aq_hw_s *self);
-
int (*hw_set_power)(struct aq_hw_s *self, unsigned int power_state);
};
struct aq_fw_ops {
int (*init)(struct aq_hw_s *self);
+ int (*deinit)(struct aq_hw_s *self);
+
int (*reset)(struct aq_hw_s *self);
+ int (*renegotiate)(struct aq_hw_s *self);
+
int (*get_mac_permanent)(struct aq_hw_s *self, u8 *mac);
int (*set_link_speed)(struct aq_hw_s *self, u32 speed);
- int (*set_state)(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state);
+ int (*set_state)(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state);
int (*update_link_status)(struct aq_hw_s *self);
int (*update_stats)(struct aq_hw_s *self);
+
+ int (*set_flow_control)(struct aq_hw_s *self);
};
#endif /* AQ_HW_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 7a22d0257e04..26dc6782b475 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -89,8 +89,8 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
aq_nic_rss_init(self, cfg->num_rss_queues);
/*descriptors */
- cfg->rxds = min(cfg->aq_hw_caps->rxds, AQ_CFG_RXDS_DEF);
- cfg->txds = min(cfg->aq_hw_caps->txds, AQ_CFG_TXDS_DEF);
+ cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF);
+ cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF);
/*rss rings */
cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
@@ -768,10 +768,14 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, advertising,
100baseT_Full);
- if (self->aq_nic_cfg.flow_control)
+ if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX)
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Pause);
+ if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Asym_Pause);
+
if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
else
@@ -886,7 +890,7 @@ void aq_nic_deinit(struct aq_nic_s *self)
aq_vec_deinit(aq_vec);
if (self->power_state == AQ_HW_POWER_STATE_D0) {
- (void)self->aq_hw_ops->hw_deinit(self->aq_hw);
+ (void)self->aq_fw_ops->deinit(self->aq_hw);
} else {
(void)self->aq_hw_ops->hw_set_power(self->aq_hw,
self->power_state);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 8cc6abadc03b..97addfa6f895 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -19,29 +19,31 @@
#include "hw_atl_a0_internal.h"
#define DEFAULT_A0_BOARD_BASIC_CAPABILITIES \
- .is_64_dma = true, \
- .msix_irqs = 4U, \
- .irq_mask = ~0U, \
- .vecs = HW_ATL_A0_RSS_MAX, \
- .tcs = HW_ATL_A0_TC_MAX, \
- .rxd_alignment = 1U, \
- .rxd_size = HW_ATL_A0_RXD_SIZE, \
- .rxds = 248U, \
- .txd_alignment = 1U, \
- .txd_size = HW_ATL_A0_TXD_SIZE, \
- .txds = 8U * 1024U, \
- .txhwb_alignment = 4096U, \
- .tx_rings = HW_ATL_A0_TX_RINGS, \
- .rx_rings = HW_ATL_A0_RX_RINGS, \
- .hw_features = NETIF_F_HW_CSUM | \
- NETIF_F_RXHASH | \
- NETIF_F_RXCSUM | \
- NETIF_F_SG | \
- NETIF_F_TSO, \
+ .is_64_dma = true, \
+ .msix_irqs = 4U, \
+ .irq_mask = ~0U, \
+ .vecs = HW_ATL_A0_RSS_MAX, \
+ .tcs = HW_ATL_A0_TC_MAX, \
+ .rxd_alignment = 1U, \
+ .rxd_size = HW_ATL_A0_RXD_SIZE, \
+ .rxds_max = HW_ATL_A0_MAX_RXD, \
+ .rxds_min = HW_ATL_A0_MIN_RXD, \
+ .txd_alignment = 1U, \
+ .txd_size = HW_ATL_A0_TXD_SIZE, \
+ .txds_max = HW_ATL_A0_MAX_TXD, \
+ .txds_min = HW_ATL_A0_MIN_RXD, \
+ .txhwb_alignment = 4096U, \
+ .tx_rings = HW_ATL_A0_TX_RINGS, \
+ .rx_rings = HW_ATL_A0_RX_RINGS, \
+ .hw_features = NETIF_F_HW_CSUM | \
+ NETIF_F_RXHASH | \
+ NETIF_F_RXCSUM | \
+ NETIF_F_SG | \
+ NETIF_F_TSO, \
.hw_priv_flags = IFF_UNICAST_FLT, \
- .flow_control = true, \
- .mtu = HW_ATL_A0_MTU_JUMBO, \
- .mac_regs_count = 88, \
+ .flow_control = true, \
+ .mtu = HW_ATL_A0_MTU_JUMBO, \
+ .mac_regs_count = 88, \
.hw_alive_check_addr = 0x10U
const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = {
@@ -875,7 +877,6 @@ static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
const struct aq_hw_ops hw_atl_ops_a0 = {
.hw_set_mac_address = hw_atl_a0_hw_mac_addr_set,
.hw_init = hw_atl_a0_hw_init,
- .hw_deinit = hw_atl_utils_hw_deinit,
.hw_set_power = hw_atl_utils_hw_set_power,
.hw_reset = hw_atl_a0_hw_reset,
.hw_start = hw_atl_a0_hw_start,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
index 1d8855558d74..3c94cff57876 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
@@ -88,4 +88,12 @@
#define HW_ATL_A0_FW_VER_EXPECTED 0x01050006U
+#define HW_ATL_A0_MIN_RXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_RXD_MULTIPLE))
+#define HW_ATL_A0_MIN_TXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_TXD_MULTIPLE))
+
+#define HW_ATL_A0_MAX_RXD 8184U
+#define HW_ATL_A0_MAX_TXD 8184U
+
#endif /* HW_ATL_A0_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 3bdab972420b..1d44a386e7d3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -20,30 +20,32 @@
#include "hw_atl_llh_internal.h"
#define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \
- .is_64_dma = true, \
- .msix_irqs = 4U, \
- .irq_mask = ~0U, \
- .vecs = HW_ATL_B0_RSS_MAX, \
- .tcs = HW_ATL_B0_TC_MAX, \
- .rxd_alignment = 1U, \
- .rxd_size = HW_ATL_B0_RXD_SIZE, \
- .rxds = 4U * 1024U, \
- .txd_alignment = 1U, \
- .txd_size = HW_ATL_B0_TXD_SIZE, \
- .txds = 8U * 1024U, \
- .txhwb_alignment = 4096U, \
- .tx_rings = HW_ATL_B0_TX_RINGS, \
- .rx_rings = HW_ATL_B0_RX_RINGS, \
- .hw_features = NETIF_F_HW_CSUM | \
- NETIF_F_RXCSUM | \
- NETIF_F_RXHASH | \
- NETIF_F_SG | \
- NETIF_F_TSO | \
- NETIF_F_LRO, \
- .hw_priv_flags = IFF_UNICAST_FLT, \
- .flow_control = true, \
- .mtu = HW_ATL_B0_MTU_JUMBO, \
- .mac_regs_count = 88, \
+ .is_64_dma = true, \
+ .msix_irqs = 4U, \
+ .irq_mask = ~0U, \
+ .vecs = HW_ATL_B0_RSS_MAX, \
+ .tcs = HW_ATL_B0_TC_MAX, \
+ .rxd_alignment = 1U, \
+ .rxd_size = HW_ATL_B0_RXD_SIZE, \
+ .rxds_max = HW_ATL_B0_MAX_RXD, \
+ .rxds_min = HW_ATL_B0_MIN_RXD, \
+ .txd_alignment = 1U, \
+ .txd_size = HW_ATL_B0_TXD_SIZE, \
+ .txds_max = HW_ATL_B0_MAX_TXD, \
+ .txds_min = HW_ATL_B0_MIN_TXD, \
+ .txhwb_alignment = 4096U, \
+ .tx_rings = HW_ATL_B0_TX_RINGS, \
+ .rx_rings = HW_ATL_B0_RX_RINGS, \
+ .hw_features = NETIF_F_HW_CSUM | \
+ NETIF_F_RXCSUM | \
+ NETIF_F_RXHASH | \
+ NETIF_F_SG | \
+ NETIF_F_TSO | \
+ NETIF_F_LRO, \
+ .hw_priv_flags = IFF_UNICAST_FLT, \
+ .flow_control = true, \
+ .mtu = HW_ATL_B0_MTU_JUMBO, \
+ .mac_regs_count = 88, \
.hw_alive_check_addr = 0x10U
const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
@@ -933,7 +935,6 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
.hw_init = hw_atl_b0_hw_init,
- .hw_deinit = hw_atl_utils_hw_deinit,
.hw_set_power = hw_atl_utils_hw_set_power,
.hw_reset = hw_atl_b0_hw_reset,
.hw_start = hw_atl_b0_hw_start,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index 405d1455c222..28568f5fa74b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -142,6 +142,14 @@
#define HW_ATL_INTR_MODER_MAX 0x1FF
#define HW_ATL_INTR_MODER_MIN 0xFF
+#define HW_ATL_B0_MIN_RXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_RXD_MULTIPLE))
+#define HW_ATL_B0_MIN_TXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_TXD_MULTIPLE))
+
+#define HW_ATL_B0_MAX_RXD 8184U
+#define HW_ATL_B0_MAX_TXD 8184U
+
/* HW layer capabilities */
#endif /* HW_ATL_B0_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index e652d86b87d4..c965e65d07db 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -30,10 +30,11 @@
#define HW_ATL_MPI_CONTROL_ADR 0x0368U
#define HW_ATL_MPI_STATE_ADR 0x036CU
-#define HW_ATL_MPI_STATE_MSK 0x00FFU
-#define HW_ATL_MPI_STATE_SHIFT 0U
-#define HW_ATL_MPI_SPEED_MSK 0xFFFF0000U
-#define HW_ATL_MPI_SPEED_SHIFT 16U
+#define HW_ATL_MPI_STATE_MSK 0x00FFU
+#define HW_ATL_MPI_STATE_SHIFT 0U
+#define HW_ATL_MPI_SPEED_MSK 0x00FF0000U
+#define HW_ATL_MPI_SPEED_SHIFT 16U
+#define HW_ATL_MPI_DIRTY_WAKE_MSK 0x02000000U
#define HW_ATL_MPI_DAISY_CHAIN_STATUS 0x704
#define HW_ATL_MPI_BOOT_EXIT_CODE 0x388
@@ -525,19 +526,20 @@ static int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed)
{
u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
- val = (val & HW_ATL_MPI_STATE_MSK) | (speed << HW_ATL_MPI_SPEED_SHIFT);
+ val = val & ~HW_ATL_MPI_SPEED_MSK;
+ val |= speed << HW_ATL_MPI_SPEED_SHIFT;
aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
return 0;
}
-void hw_atl_utils_mpi_set(struct aq_hw_s *self,
- enum hal_atl_utils_fw_state_e state,
- u32 speed)
+static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state)
{
int err = 0;
u32 transaction_id = 0;
struct hw_aq_atl_utils_mbox_header mbox;
+ u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
if (state == MPI_RESET) {
hw_atl_utils_mpi_read_mbox(self, &mbox);
@@ -551,21 +553,21 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
if (err < 0)
goto err_exit;
}
+ /* On interface DEINIT we disable DW (raise bit)
+ * Otherwise enable DW (clear bit)
+ */
+ if (state == MPI_DEINIT || state == MPI_POWER)
+ val |= HW_ATL_MPI_DIRTY_WAKE_MSK;
+ else
+ val &= ~HW_ATL_MPI_DIRTY_WAKE_MSK;
- aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR,
- (speed << HW_ATL_MPI_SPEED_SHIFT) | state);
-
-err_exit:;
-}
-
-static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
- enum hal_atl_utils_fw_state_e state)
-{
- u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
+ /* Set new state bits */
+ val = val & ~HW_ATL_MPI_STATE_MSK;
+ val |= state & HW_ATL_MPI_STATE_MSK;
- val = state | (val & HW_ATL_MPI_SPEED_MSK);
aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
- return 0;
+err_exit:
+ return err;
}
int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
@@ -721,16 +723,18 @@ void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
*p = chip_features;
}
-int hw_atl_utils_hw_deinit(struct aq_hw_s *self)
+static int hw_atl_fw1x_deinit(struct aq_hw_s *self)
{
- hw_atl_utils_mpi_set(self, MPI_DEINIT, 0x0U);
+ hw_atl_utils_mpi_set_speed(self, 0);
+ hw_atl_utils_mpi_set_state(self, MPI_DEINIT);
return 0;
}
int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
unsigned int power_state)
{
- hw_atl_utils_mpi_set(self, MPI_POWER, 0x0U);
+ hw_atl_utils_mpi_set_speed(self, 0);
+ hw_atl_utils_mpi_set_state(self, MPI_POWER);
return 0;
}
@@ -823,10 +827,12 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version)
const struct aq_fw_ops aq_fw_1x_ops = {
.init = hw_atl_utils_mpi_create,
+ .deinit = hw_atl_fw1x_deinit,
.reset = NULL,
.get_mac_permanent = hw_atl_utils_get_mac_permanent,
.set_link_speed = hw_atl_utils_mpi_set_speed,
.set_state = hw_atl_utils_mpi_set_state,
.update_link_status = hw_atl_utils_mpi_get_link_status,
.update_stats = hw_atl_utils_update_stats,
+ .set_flow_control = NULL,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index cd8f18f39c61..b875590efcbd 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -239,6 +239,41 @@ enum hw_atl_fw2x_caps_hi {
CAPS_HI_TRANSACTION_ID,
};
+enum hw_atl_fw2x_ctrl {
+ CTRL_RESERVED1 = 0x00,
+ CTRL_RESERVED2,
+ CTRL_RESERVED3,
+ CTRL_PAUSE,
+ CTRL_ASYMMETRIC_PAUSE,
+ CTRL_RESERVED4,
+ CTRL_RESERVED5,
+ CTRL_RESERVED6,
+ CTRL_1GBASET_FD_EEE,
+ CTRL_2P5GBASET_FD_EEE,
+ CTRL_5GBASET_FD_EEE,
+ CTRL_10GBASET_FD_EEE,
+ CTRL_THERMAL_SHUTDOWN,
+ CTRL_PHY_LOGS,
+ CTRL_EEE_AUTO_DISABLE,
+ CTRL_PFC,
+ CTRL_WAKE_ON_LINK,
+ CTRL_CABLE_DIAG,
+ CTRL_TEMPERATURE,
+ CTRL_DOWNSHIFT,
+ CTRL_PTP_AVB,
+ CTRL_RESERVED7,
+ CTRL_LINK_DROP,
+ CTRL_SLEEP_PROXY,
+ CTRL_WOL,
+ CTRL_MAC_STOP,
+ CTRL_EXT_LOOPBACK,
+ CTRL_INT_LOOPBACK,
+ CTRL_RESERVED8,
+ CTRL_WOL_TIMER,
+ CTRL_STATISTICS,
+ CTRL_FORCE_RECONNECT,
+};
+
struct aq_hw_s;
struct aq_fw_ops;
struct aq_hw_caps_s;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index 39cd3a27fe77..e37943760a58 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -28,6 +28,10 @@
#define HW_ATL_FW2X_MPI_STATE_ADDR 0x370
#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374
+static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed);
+static int aq_fw2x_set_state(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state);
+
static int aq_fw2x_init(struct aq_hw_s *self)
{
int err = 0;
@@ -39,6 +43,16 @@ static int aq_fw2x_init(struct aq_hw_s *self)
return err;
}
+static int aq_fw2x_deinit(struct aq_hw_s *self)
+{
+ int err = aq_fw2x_set_link_speed(self, 0);
+
+ if (!err)
+ err = aq_fw2x_set_state(self, MPI_DEINIT);
+
+ return err;
+}
+
static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed)
{
enum hw_atl_fw2x_rate rate = 0;
@@ -73,10 +87,38 @@ static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed)
return 0;
}
+static void aq_fw2x_set_mpi_flow_control(struct aq_hw_s *self, u32 *mpi_state)
+{
+ if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
+ *mpi_state |= BIT(CAPS_HI_PAUSE);
+ else
+ *mpi_state &= ~BIT(CAPS_HI_PAUSE);
+
+ if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_TX)
+ *mpi_state |= BIT(CAPS_HI_ASYMMETRIC_PAUSE);
+ else
+ *mpi_state &= ~BIT(CAPS_HI_ASYMMETRIC_PAUSE);
+}
+
static int aq_fw2x_set_state(struct aq_hw_s *self,
enum hal_atl_utils_fw_state_e state)
{
- /* No explicit state in 2x fw */
+ u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+
+ switch (state) {
+ case MPI_INIT:
+ mpi_state &= ~BIT(CAPS_HI_LINK_DROP);
+ aq_fw2x_set_mpi_flow_control(self, &mpi_state);
+ break;
+ case MPI_DEINIT:
+ mpi_state |= BIT(CAPS_HI_LINK_DROP);
+ break;
+ case MPI_RESET:
+ case MPI_POWER:
+ /* No actions */
+ break;
+ }
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state);
return 0;
}
@@ -173,12 +215,37 @@ static int aq_fw2x_update_stats(struct aq_hw_s *self)
return hw_atl_utils_update_stats(self);
}
+static int aq_fw2x_renegotiate(struct aq_hw_s *self)
+{
+ u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+
+ mpi_opts |= BIT(CTRL_FORCE_RECONNECT);
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+
+ return 0;
+}
+
+static int aq_fw2x_set_flow_control(struct aq_hw_s *self)
+{
+ u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+
+ aq_fw2x_set_mpi_flow_control(self, &mpi_state);
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state);
+
+ return 0;
+}
+
const struct aq_fw_ops aq_fw_2x_ops = {
.init = aq_fw2x_init,
+ .deinit = aq_fw2x_deinit,
.reset = NULL,
+ .renegotiate = aq_fw2x_renegotiate,
.get_mac_permanent = aq_fw2x_get_mac_permanent,
.set_link_speed = aq_fw2x_set_link_speed,
.set_state = aq_fw2x_set_state,
.update_link_status = aq_fw2x_update_link_status,
.update_stats = aq_fw2x_update_stats,
+ .set_flow_control = aq_fw2x_set_flow_control,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h
index a445de6837a6..94efc6477bdc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/ver.h
+++ b/drivers/net/ethernet/aquantia/atlantic/ver.h
@@ -12,8 +12,8 @@
#define NIC_MAJOR_DRIVER_VERSION 2
#define NIC_MINOR_DRIVER_VERSION 0
-#define NIC_BUILD_DRIVER_VERSION 2
-#define NIC_REVISION_DRIVER_VERSION 1
+#define NIC_BUILD_DRIVER_VERSION 3
+#define NIC_REVISION_DRIVER_VERSION 0
#define AQ_CFG_DRV_VERSION_SUFFIX "-kern"
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 5e5022fa1d04..6d3221134927 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1279,7 +1279,6 @@ static void alx_check_link(struct alx_priv *alx)
struct alx_hw *hw = &alx->hw;
unsigned long flags;
int old_speed;
- u8 old_duplex;
int err;
/* clear PHY internal interrupt status, otherwise the main
@@ -1288,7 +1287,6 @@ static void alx_check_link(struct alx_priv *alx)
alx_clear_phy_intr(hw);
old_speed = hw->link_speed;
- old_duplex = hw->duplex;
err = alx_read_phy_link(hw);
if (err < 0)
goto reset;
diff --git a/drivers/net/ethernet/aurora/Kconfig b/drivers/net/ethernet/aurora/Kconfig
index 8ba7f8ff3434..392f564d8fd4 100644
--- a/drivers/net/ethernet/aurora/Kconfig
+++ b/drivers/net/ethernet/aurora/Kconfig
@@ -1,5 +1,6 @@
config NET_VENDOR_AURORA
bool "Aurora VLSI devices"
+ default y
help
If you have a network (Ethernet) device belonging to this class,
say Y.
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index e94159507847..c8d1f8fa4713 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -304,12 +304,10 @@ static int nb8800_poll(struct napi_struct *napi, int budget)
again:
do {
- struct nb8800_rx_buf *rxb;
unsigned int len;
next = (last + 1) % RX_DESC_COUNT;
- rxb = &priv->rx_bufs[next];
rxd = &priv->rx_descs[next];
if (!rxd->report)
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 4c3bfde6e8de..c1d3ee9baf7e 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -61,7 +61,7 @@ config BCM63XX_ENET
config BCMGENET
tristate "Broadcom GENET internal MAC support"
- depends on OF && HAS_IOMEM
+ depends on HAS_IOMEM
select MII
select PHYLIB
select FIXED_PHY
@@ -181,7 +181,7 @@ config BGMAC_PLATFORM
config SYSTEMPORT
tristate "Broadcom SYSTEMPORT internal MAC support"
- depends on OF
+ depends on HAS_IOMEM
depends on NET_DSA || !NET_DSA
select MII
select PHYLIB
@@ -230,4 +230,12 @@ config BNXT_DCB
If unsure, say N.
+config BNXT_HWMON
+ bool "Broadcom NetXtreme-C/E HWMON support"
+ default y
+ depends on BNXT && HWMON && !(BNXT=y && HWMON=m)
+ ---help---
+ Say Y if you want to expose the thermal sensor data on NetXtreme-C/E
+ devices, via the hwmon sysfs interface.
+
endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index a1f60f89e059..147045757b10 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -521,7 +521,7 @@ static void bcm_sysport_get_wol(struct net_device *dev,
struct bcm_sysport_priv *priv = netdev_priv(dev);
u32 reg;
- wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE;
+ wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
wol->wolopts = priv->wolopts;
if (!(priv->wolopts & WAKE_MAGICSECURE))
@@ -539,7 +539,7 @@ static int bcm_sysport_set_wol(struct net_device *dev,
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
- u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE;
+ u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
if (!device_can_wakeup(kdev))
return -ENOTSUPP;
@@ -1041,17 +1041,45 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)
return work_done;
}
+static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable)
+{
+ u32 reg, bit;
+
+ reg = umac_readl(priv, UMAC_MPD_CTRL);
+ if (enable)
+ reg |= MPD_EN;
+ else
+ reg &= ~MPD_EN;
+ umac_writel(priv, reg, UMAC_MPD_CTRL);
+
+ if (priv->is_lite)
+ bit = RBUF_ACPI_EN_LITE;
+ else
+ bit = RBUF_ACPI_EN;
+
+ reg = rbuf_readl(priv, RBUF_CONTROL);
+ if (enable)
+ reg |= bit;
+ else
+ reg &= ~bit;
+ rbuf_writel(priv, reg, RBUF_CONTROL);
+}
+
static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
{
u32 reg;
/* Stop monitoring MPD interrupt */
- intrl2_0_mask_set(priv, INTRL2_0_MPD);
+ intrl2_0_mask_set(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
+
+ /* Disable RXCHK, active filters and Broadcom tag matching */
+ reg = rxchk_readl(priv, RXCHK_CONTROL);
+ reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
+ RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN);
+ rxchk_writel(priv, reg, RXCHK_CONTROL);
/* Clear the MagicPacket detection logic */
- reg = umac_readl(priv, UMAC_MPD_CTRL);
- reg &= ~MPD_EN;
- umac_writel(priv, reg, UMAC_MPD_CTRL);
+ mpd_enable_set(priv, false);
netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
}
@@ -1077,6 +1105,7 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
struct bcm_sysport_priv *priv = netdev_priv(dev);
struct bcm_sysport_tx_ring *txr;
unsigned int ring, ring_bit;
+ u32 reg;
priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
@@ -1102,9 +1131,14 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
bcm_sysport_tx_reclaim_all(priv);
- if (priv->irq0_stat & INTRL2_0_MPD) {
- netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n");
- bcm_sysport_resume_from_wol(priv);
+ if (priv->irq0_stat & INTRL2_0_MPD)
+ netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
+
+ if (priv->irq0_stat & INTRL2_0_BRCM_MATCH_TAG) {
+ reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
+ RXCHK_BRCM_TAG_MATCH_MASK;
+ netdev_info(priv->netdev,
+ "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
}
if (!priv->is_lite)
@@ -2090,6 +2124,132 @@ static int bcm_sysport_stop(struct net_device *dev)
return 0;
}
+static int bcm_sysport_rule_find(struct bcm_sysport_priv *priv,
+ u64 location)
+{
+ unsigned int index;
+ u32 reg;
+
+ for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
+ reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
+ reg >>= RXCHK_BRCM_TAG_CID_SHIFT;
+ reg &= RXCHK_BRCM_TAG_CID_MASK;
+ if (reg == location)
+ return index;
+ }
+
+ return -EINVAL;
+}
+
+static int bcm_sysport_rule_get(struct bcm_sysport_priv *priv,
+ struct ethtool_rxnfc *nfc)
+{
+ int index;
+
+ /* This is not a rule that we know about */
+ index = bcm_sysport_rule_find(priv, nfc->fs.location);
+ if (index < 0)
+ return -EOPNOTSUPP;
+
+ nfc->fs.ring_cookie = RX_CLS_FLOW_WAKE;
+
+ return 0;
+}
+
+static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
+ struct ethtool_rxnfc *nfc)
+{
+ unsigned int index;
+ u32 reg;
+
+ /* We cannot match locations greater than what the classification ID
+ * permits (256 entries)
+ */
+ if (nfc->fs.location > RXCHK_BRCM_TAG_CID_MASK)
+ return -E2BIG;
+
+ /* We cannot support flows that are not destined for a wake-up */
+ if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE)
+ return -EOPNOTSUPP;
+
+ /* All filters are already in use, we cannot match more rules */
+ if (bitmap_weight(priv->filters, RXCHK_BRCM_TAG_MAX) ==
+ RXCHK_BRCM_TAG_MAX)
+ return -ENOSPC;
+
+ index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
+ if (index > RXCHK_BRCM_TAG_MAX)
+ return -ENOSPC;
+
+ /* Location is the classification ID, and index is the position
+ * within one of our 8 possible filters to be programmed
+ */
+ reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
+ reg &= ~(RXCHK_BRCM_TAG_CID_MASK << RXCHK_BRCM_TAG_CID_SHIFT);
+ reg |= nfc->fs.location << RXCHK_BRCM_TAG_CID_SHIFT;
+ rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index));
+ rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
+
+ set_bit(index, priv->filters);
+
+ return 0;
+}
+
+static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv,
+ u64 location)
+{
+ int index;
+
+ /* This is not a rule that we know about */
+ index = bcm_sysport_rule_find(priv, location);
+ if (index < 0)
+ return -EOPNOTSUPP;
+
+ /* No need to disable this filter if it was enabled, this will
+ * be taken care of during suspend time by bcm_sysport_suspend_to_wol
+ */
+ clear_bit(index, priv->filters);
+
+ return 0;
+}
+
+static int bcm_sysport_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *nfc, u32 *rule_locs)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (nfc->cmd) {
+ case ETHTOOL_GRXCLSRULE:
+ ret = bcm_sysport_rule_get(priv, nfc);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int bcm_sysport_set_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *nfc)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (nfc->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ ret = bcm_sysport_rule_set(priv, nfc);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = bcm_sysport_rule_del(priv, nfc->fs.location);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
static const struct ethtool_ops bcm_sysport_ethtool_ops = {
.get_drvinfo = bcm_sysport_get_drvinfo,
.get_msglevel = bcm_sysport_get_msglvl,
@@ -2104,10 +2264,12 @@ static const struct ethtool_ops bcm_sysport_ethtool_ops = {
.set_coalesce = bcm_sysport_set_coalesce,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_rxnfc = bcm_sysport_get_rxnfc,
+ .set_rxnfc = bcm_sysport_set_rxnfc,
};
static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
@@ -2116,7 +2278,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
unsigned int q, port;
if (!netdev_uses_dsa(dev))
- return fallback(dev, skb);
+ return fallback(dev, skb, NULL);
/* DSA tagging layer will have configured the correct queue */
q = BRCM_TAG_GET_QUEUE(queue);
@@ -2124,7 +2286,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
if (unlikely(!tx_ring))
- return fallback(dev, skb);
+ return fallback(dev, skb, NULL);
return tx_ring->index;
}
@@ -2423,21 +2585,43 @@ static int bcm_sysport_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
{
struct net_device *ndev = priv->netdev;
unsigned int timeout = 1000;
+ unsigned int index, i = 0;
u32 reg;
/* Password has already been programmed */
reg = umac_readl(priv, UMAC_MPD_CTRL);
- reg |= MPD_EN;
+ if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
+ reg |= MPD_EN;
reg &= ~PSW_EN;
if (priv->wolopts & WAKE_MAGICSECURE)
reg |= PSW_EN;
umac_writel(priv, reg, UMAC_MPD_CTRL);
+ if (priv->wolopts & WAKE_FILTER) {
+ /* Turn on ACPI matching to steal packets from RBUF */
+ reg = rbuf_readl(priv, RBUF_CONTROL);
+ if (priv->is_lite)
+ reg |= RBUF_ACPI_EN_LITE;
+ else
+ reg |= RBUF_ACPI_EN;
+ rbuf_writel(priv, reg, RBUF_CONTROL);
+
+ /* Enable RXCHK, active filters and Broadcom tag matching */
+ reg = rxchk_readl(priv, RXCHK_CONTROL);
+ reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
+ RXCHK_BRCM_TAG_MATCH_SHIFT);
+ for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
+ reg |= BIT(RXCHK_BRCM_TAG_MATCH_SHIFT + i);
+ i++;
+ }
+ reg |= RXCHK_EN | RXCHK_BRCM_TAG_EN;
+ rxchk_writel(priv, reg, RXCHK_CONTROL);
+ }
+
/* Make sure RBUF entered WoL mode as result */
do {
reg = rbuf_readl(priv, RBUF_STATUS);
@@ -2449,9 +2633,7 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
/* Do not leave the UniMAC RBUF matching only MPD packets */
if (!timeout) {
- reg = umac_readl(priv, UMAC_MPD_CTRL);
- reg &= ~MPD_EN;
- umac_writel(priv, reg, UMAC_MPD_CTRL);
+ mpd_enable_set(priv, false);
netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
return -ETIMEDOUT;
}
@@ -2460,14 +2642,14 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
umac_enable_set(priv, CMD_RX_EN, 1);
/* Enable the interrupt wake-up source */
- intrl2_0_mask_clear(priv, INTRL2_0_MPD);
+ intrl2_0_mask_clear(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
netif_dbg(priv, wol, ndev, "entered WOL mode\n");
return 0;
}
-static int bcm_sysport_suspend(struct device *d)
+static int __maybe_unused bcm_sysport_suspend(struct device *d)
{
struct net_device *dev = dev_get_drvdata(d);
struct bcm_sysport_priv *priv = netdev_priv(dev);
@@ -2529,7 +2711,7 @@ static int bcm_sysport_suspend(struct device *d)
return ret;
}
-static int bcm_sysport_resume(struct device *d)
+static int __maybe_unused bcm_sysport_resume(struct device *d)
{
struct net_device *dev = dev_get_drvdata(d);
struct bcm_sysport_priv *priv = netdev_priv(dev);
@@ -2622,7 +2804,6 @@ out_free_tx_rings:
bcm_sysport_fini_tx_ring(priv, i);
return ret;
}
-#endif
static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
bcm_sysport_suspend, bcm_sysport_resume);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index cf440b91fd04..046c6c1d97fd 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -11,6 +11,7 @@
#ifndef __BCM_SYSPORT_H
#define __BCM_SYSPORT_H
+#include <linux/bitmap.h>
#include <linux/if_vlan.h>
#include <linux/net_dim.h>
@@ -155,14 +156,18 @@ struct bcm_rsb {
#define RXCHK_PARSE_AUTH (1 << 22)
#define RXCHK_BRCM_TAG0 0x04
-#define RXCHK_BRCM_TAG(i) ((i) * RXCHK_BRCM_TAG0)
+#define RXCHK_BRCM_TAG(i) ((i) * 0x4 + RXCHK_BRCM_TAG0)
#define RXCHK_BRCM_TAG0_MASK 0x24
-#define RXCHK_BRCM_TAG_MASK(i) ((i) * RXCHK_BRCM_TAG0_MASK)
+#define RXCHK_BRCM_TAG_MASK(i) ((i) * 0x4 + RXCHK_BRCM_TAG0_MASK)
#define RXCHK_BRCM_TAG_MATCH_STATUS 0x44
#define RXCHK_ETHERTYPE 0x48
#define RXCHK_BAD_CSUM_CNTR 0x4C
#define RXCHK_OTHER_DISC_CNTR 0x50
+#define RXCHK_BRCM_TAG_MAX 8
+#define RXCHK_BRCM_TAG_CID_SHIFT 16
+#define RXCHK_BRCM_TAG_CID_MASK 0xff
+
/* TXCHCK offsets and defines */
#define SYS_PORT_TXCHK_OFFSET 0x380
#define TXCHK_PKT_RDY_THRESH 0x00
@@ -185,6 +190,7 @@ struct bcm_rsb {
#define RBUF_RSB_SWAP0 (1 << 22)
#define RBUF_RSB_SWAP1 (1 << 23)
#define RBUF_ACPI_EN (1 << 23)
+#define RBUF_ACPI_EN_LITE (1 << 24)
#define RBUF_PKT_RDY_THRESH 0x04
@@ -777,6 +783,7 @@ struct bcm_sysport_priv {
/* Ethtool */
u32 msg_enable;
+ DECLARE_BITMAP(filters, RXCHK_BRCM_TAG_MAX);
struct bcm_sysport_stats64 stats64;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index e6ea8e61f96d..4c94d9218bba 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -236,7 +236,6 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
{
struct device *dma_dev = bgmac->dma_dev;
int empty_slot;
- bool freed = false;
unsigned bytes_compl = 0, pkts_compl = 0;
/* The last slot that hardware didn't consume yet */
@@ -279,7 +278,6 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
slot->dma_addr = 0;
ring->start++;
- freed = true;
}
if (!pkts_compl)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index af7b5a4d8ba0..5a727d4729da 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1910,7 +1910,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
}
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -1932,7 +1933,8 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
}
/* select a non-FCoE queue */
- return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
+ return fallback(dev, skb, NULL) %
+ (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
}
void bnx2x_set_num_queues(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index a8ce5c55bbb0..0e508e5defce 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -497,7 +497,8 @@ int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback);
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback);
static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
struct bnx2x_fastpath *fp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 22243c480a05..98d4c5a3ff21 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6339,6 +6339,7 @@ int bnx2x_set_led(struct link_params *params,
*/
if (!vars->link_up)
break;
+ /* else: fall through */
case LED_MODE_ON:
if (((params->phy[EXT_PHY1].type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
@@ -12521,11 +12522,13 @@ static void bnx2x_phy_def_cfg(struct link_params *params,
switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
case PORT_FEATURE_LINK_SPEED_10M_HALF:
phy->req_duplex = DUPLEX_HALF;
+ /* fall through */
case PORT_FEATURE_LINK_SPEED_10M_FULL:
phy->req_line_speed = SPEED_10;
break;
case PORT_FEATURE_LINK_SPEED_100M_HALF:
phy->req_duplex = DUPLEX_HALF;
+ /* fall through */
case PORT_FEATURE_LINK_SPEED_100M_FULL:
phy->req_line_speed = SPEED_100;
break;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 57348f2b49a3..71362b7f6040 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -8561,11 +8561,11 @@ int bnx2x_set_int_mode(struct bnx2x *bp)
bp->num_queues,
1 + bp->num_cnic_queues);
- /* falling through... */
+ /* fall through */
case BNX2X_INT_MODE_MSI:
bnx2x_enable_msi(bp);
- /* falling through... */
+ /* fall through */
case BNX2X_INT_MODE_INTX:
bp->num_ethernet_queues = 1;
bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 8baf9d3eb4b1..3f4d2c8da21a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -3258,7 +3258,7 @@ static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
/* DEL command deletes all currently configured MACs */
case BNX2X_MCAST_CMD_DEL:
o->set_registry_size(o, 0);
- /* Don't break */
+ /* fall through */
/* RESTORE command will restore the entire multicast configuration */
case BNX2X_MCAST_CMD_RESTORE:
@@ -3592,7 +3592,7 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
/* DEL command deletes all currently configured MACs */
case BNX2X_MCAST_CMD_DEL:
o->set_registry_size(o, 0);
- /* Don't break */
+ /* fall through */
/* RESTORE command will restore the entire multicast configuration */
case BNX2X_MCAST_CMD_RESTORE:
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index dc77bfded865..62da46537734 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1827,6 +1827,7 @@ get_vf:
DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",
vf->abs_vfid, qidx);
bnx2x_vf_handle_rss_update_eqe(bp, vf);
+ /* fall through */
case EVENT_RING_OPCODE_VF_FLR:
/* Do nothing for now */
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 4394c1162be4..8bb1e38b1681 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -51,6 +51,8 @@
#include <linux/cpu_rmap.h>
#include <linux/cpumask.h>
#include <net/pkt_cls.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -1115,7 +1117,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
tpa_info->hash_type = PKT_HASH_TYPE_L4;
tpa_info->gso_type = SKB_GSO_TCPV4;
/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
- if (hash_type == 3)
+ if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
tpa_info->gso_type = SKB_GSO_TCPV6;
tpa_info->rss_hash =
le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
@@ -1727,8 +1729,8 @@ static int bnxt_async_event_process(struct bnxt *bp,
speed);
}
set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
- /* fall thru */
}
+ /* fall through */
case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
break;
@@ -3012,13 +3014,6 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp)
bp->hwrm_cmd_resp_dma_addr);
bp->hwrm_cmd_resp_addr = NULL;
- if (bp->hwrm_dbg_resp_addr) {
- dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
- bp->hwrm_dbg_resp_addr,
- bp->hwrm_dbg_resp_dma_addr);
-
- bp->hwrm_dbg_resp_addr = NULL;
- }
}
static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
@@ -3030,12 +3025,6 @@ static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
GFP_KERNEL);
if (!bp->hwrm_cmd_resp_addr)
return -ENOMEM;
- bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
- HWRM_DBG_REG_BUF_SIZE,
- &bp->hwrm_dbg_resp_dma_addr,
- GFP_KERNEL);
- if (!bp->hwrm_dbg_resp_addr)
- netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
return 0;
}
@@ -3458,7 +3447,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
cp_ring_id = le16_to_cpu(req->cmpl_ring);
intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
- if (bp->flags & BNXT_FLAG_SHORT_CMD) {
+ if (bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) {
void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
memcpy(short_cmd_req, req, msg_len);
@@ -3651,7 +3640,9 @@ int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
{
+ struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_drv_rgtr_input req = {0};
+ int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
@@ -3689,7 +3680,15 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
}
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ rc = -EIO;
+ else if (resp->flags &
+ cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
+ bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
}
static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
@@ -3994,6 +3993,7 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
if (set_rss) {
req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
+ req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
max_rings = bp->rx_nr_rings - 1;
@@ -4591,7 +4591,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
}
hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
- if (bp->flags & BNXT_FLAG_NEW_RM) {
+ if (BNXT_NEW_RM(bp)) {
u16 cp, stats;
hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
@@ -4637,7 +4637,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
req->fid = cpu_to_le16(0xffff);
enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
req->num_tx_rings = cpu_to_le16(tx_rings);
- if (bp->flags & BNXT_FLAG_NEW_RM) {
+ if (BNXT_NEW_RM(bp)) {
enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
@@ -4710,7 +4710,7 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
struct hwrm_func_vf_cfg_input req = {0};
int rc;
- if (!(bp->flags & BNXT_FLAG_NEW_RM)) {
+ if (!BNXT_NEW_RM(bp)) {
bp->hw_resc.resv_tx_rings = tx_rings;
return 0;
}
@@ -4770,7 +4770,7 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
vnic = rx + 1;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
- if ((bp->flags & BNXT_FLAG_NEW_RM) &&
+ if (BNXT_NEW_RM(bp) &&
(hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
hw_resc->resv_hw_ring_grps != grp || hw_resc->resv_vnics != vnic))
return true;
@@ -4806,7 +4806,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
return rc;
tx = hw_resc->resv_tx_rings;
- if (bp->flags & BNXT_FLAG_NEW_RM) {
+ if (BNXT_NEW_RM(bp)) {
rx = hw_resc->resv_rx_rings;
cp = hw_resc->resv_cp_rings;
grp = hw_resc->resv_hw_ring_grps;
@@ -4850,7 +4850,7 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
u32 flags;
int rc;
- if (!(bp->flags & BNXT_FLAG_NEW_RM))
+ if (!BNXT_NEW_RM(bp))
return 0;
__bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
@@ -4879,7 +4879,7 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
__bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
cp_rings, vnics);
flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
- if (bp->flags & BNXT_FLAG_NEW_RM)
+ if (BNXT_NEW_RM(bp))
flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
@@ -5101,9 +5101,9 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
flags = le16_to_cpu(resp->flags);
if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
- bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
+ bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
- bp->flags |= BNXT_FLAG_FW_DCBX_AGENT;
+ bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
}
if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
bp->flags |= BNXT_FLAG_MULTI_HOST;
@@ -5175,7 +5175,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
pf->vf_resv_strategy =
le16_to_cpu(resp->vf_reservation_strategy);
- if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL)
+ if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
}
hwrm_func_resc_qcaps_exit:
@@ -5261,7 +5261,7 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (bp->hwrm_spec_code >= 0x10803) {
rc = bnxt_hwrm_func_resc_qcaps(bp, true);
if (!rc)
- bp->flags |= BNXT_FLAG_NEW_RM;
+ bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
}
return 0;
}
@@ -5281,7 +5281,8 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
int rc = 0;
struct hwrm_queue_qportcfg_input req = {0};
struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
- u8 i, *qptr;
+ u8 i, j, *qptr;
+ bool no_rdma;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
@@ -5299,19 +5300,24 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
if (bp->max_tc > BNXT_MAX_QUEUE)
bp->max_tc = BNXT_MAX_QUEUE;
+ no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
+ qptr = &resp->queue_id0;
+ for (i = 0, j = 0; i < bp->max_tc; i++) {
+ bp->q_info[j].queue_id = *qptr++;
+ bp->q_info[j].queue_profile = *qptr++;
+ bp->tc_to_qidx[j] = j;
+ if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
+ (no_rdma && BNXT_PF(bp)))
+ j++;
+ }
+ bp->max_tc = max_t(u8, j, 1);
+
if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
bp->max_tc = 1;
if (bp->max_lltc > bp->max_tc)
bp->max_lltc = bp->max_tc;
- qptr = &resp->queue_id0;
- for (i = 0; i < bp->max_tc; i++) {
- bp->q_info[i].queue_id = *qptr++;
- bp->q_info[i].queue_profile = *qptr++;
- bp->tc_to_qidx[i] = i;
- }
-
qportcfg_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -5364,7 +5370,7 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
(dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
- bp->flags |= BNXT_FLAG_SHORT_CMD;
+ bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
hwrm_ver_get_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -5933,7 +5939,7 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num)
max_idx = min_t(int, bp->total_irqs, max_cp);
avail_msix = max_idx - bp->cp_nr_rings;
- if (!(bp->flags & BNXT_FLAG_NEW_RM) || avail_msix >= num)
+ if (!BNXT_NEW_RM(bp) || avail_msix >= num)
return avail_msix;
if (max_irq < total_req) {
@@ -5946,7 +5952,7 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num)
static int bnxt_get_num_msix(struct bnxt *bp)
{
- if (!(bp->flags & BNXT_FLAG_NEW_RM))
+ if (!BNXT_NEW_RM(bp))
return bnxt_get_max_func_irqs(bp);
return bnxt_cp_rings_in_use(bp);
@@ -6069,8 +6075,7 @@ int bnxt_reserve_rings(struct bnxt *bp)
netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc);
return rc;
}
- if ((bp->flags & BNXT_FLAG_NEW_RM) &&
- (bnxt_get_num_msix(bp) != bp->total_irqs)) {
+ if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) {
bnxt_ulp_irq_stop(bp);
bnxt_clear_int_mode(bp);
rc = bnxt_init_int_mode(bp);
@@ -6350,6 +6355,10 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
}
+ if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
+ if (bp->test_info)
+ bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
+ }
if (resp->supported_speeds_auto_mode)
link_info->support_auto_speeds =
le16_to_cpu(resp->supported_speeds_auto_mode);
@@ -6646,6 +6655,39 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
+static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
+{
+ struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_drv_if_change_input req = {0};
+ bool resc_reinit = false;
+ int rc;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
+ if (up)
+ req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc && (resp->flags &
+ cpu_to_le32(FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)))
+ resc_reinit = true;
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ if (up && resc_reinit && BNXT_NEW_RM(bp)) {
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+
+ rc = bnxt_hwrm_func_resc_qcaps(bp, true);
+ hw_resc->resv_cp_rings = 0;
+ hw_resc->resv_tx_rings = 0;
+ hw_resc->resv_rx_rings = 0;
+ hw_resc->resv_hw_ring_grps = 0;
+ hw_resc->resv_vnics = 0;
+ }
+ return rc;
+}
+
static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
{
struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
@@ -6755,6 +6797,62 @@ static void bnxt_get_wol_settings(struct bnxt *bp)
} while (handle && handle != 0xffff);
}
+#ifdef CONFIG_BNXT_HWMON
+static ssize_t bnxt_show_temp(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct hwrm_temp_monitor_query_input req = {0};
+ struct hwrm_temp_monitor_query_output *resp;
+ struct bnxt *bp = dev_get_drvdata(dev);
+ u32 temp = 0;
+
+ resp = bp->hwrm_cmd_resp_addr;
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
+ temp = resp->temp * 1000; /* display millidegree */
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ return sprintf(buf, "%u\n", temp);
+}
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
+
+static struct attribute *bnxt_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(bnxt);
+
+static void bnxt_hwmon_close(struct bnxt *bp)
+{
+ if (bp->hwmon_dev) {
+ hwmon_device_unregister(bp->hwmon_dev);
+ bp->hwmon_dev = NULL;
+ }
+}
+
+static void bnxt_hwmon_open(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+
+ bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
+ DRV_MODULE_NAME, bp,
+ bnxt_groups);
+ if (IS_ERR(bp->hwmon_dev)) {
+ bp->hwmon_dev = NULL;
+ dev_warn(&pdev->dev, "Cannot register hwmon device\n");
+ }
+}
+#else
+static void bnxt_hwmon_close(struct bnxt *bp)
+{
+}
+
+static void bnxt_hwmon_open(struct bnxt *bp)
+{
+}
+#endif
+
static bool bnxt_eee_config_ok(struct bnxt *bp)
{
struct ethtool_eee *eee = &bp->eee;
@@ -6907,8 +7005,14 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
mutex_lock(&bp->link_lock);
rc = bnxt_update_phy_setting(bp);
mutex_unlock(&bp->link_lock);
- if (rc)
+ if (rc) {
netdev_warn(bp->dev, "failed to update phy settings\n");
+ if (BNXT_SINGLE_PF(bp)) {
+ bp->link_info.phy_retry = true;
+ bp->link_info.phy_retry_expires =
+ jiffies + 5 * HZ;
+ }
+ }
}
if (irq_re_init)
@@ -6994,8 +7098,16 @@ void bnxt_half_close_nic(struct bnxt *bp)
static int bnxt_open(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
+ int rc;
+
+ bnxt_hwrm_if_change(bp, true);
+ rc = __bnxt_open_nic(bp, true, true);
+ if (rc)
+ bnxt_hwrm_if_change(bp, false);
+
+ bnxt_hwmon_open(bp);
- return __bnxt_open_nic(bp, true, true);
+ return rc;
}
static bool bnxt_drv_busy(struct bnxt *bp)
@@ -7057,8 +7169,10 @@ static int bnxt_close(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
+ bnxt_hwmon_close(bp);
bnxt_close_nic(bp, true, true);
bnxt_hwrm_shutdown_link(bp);
+ bnxt_hwrm_if_change(bp, false);
return 0;
}
@@ -7308,7 +7422,7 @@ skip_uc:
static bool bnxt_can_reserve_rings(struct bnxt *bp)
{
#ifdef CONFIG_BNXT_SRIOV
- if ((bp->flags & BNXT_FLAG_NEW_RM) && BNXT_VF(bp)) {
+ if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
/* No minimum rings were provisioned by the PF. Don't
@@ -7358,7 +7472,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
return false;
}
- if (!(bp->flags & BNXT_FLAG_NEW_RM))
+ if (!BNXT_NEW_RM(bp))
return true;
if (vnics == bp->hw_resc.resv_vnics)
@@ -7592,6 +7706,16 @@ static void bnxt_timer(struct timer_list *t)
set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
bnxt_queue_sp_work(bp);
}
+
+ if (bp->link_info.phy_retry) {
+ if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
+ bp->link_info.phy_retry = 0;
+ netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
+ } else {
+ set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
+ bnxt_queue_sp_work(bp);
+ }
+ }
bnxt_restart_timer:
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
@@ -7679,6 +7803,19 @@ static void bnxt_sp_task(struct work_struct *work)
netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
rc);
}
+ if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
+ int rc;
+
+ mutex_lock(&bp->link_lock);
+ rc = bnxt_update_phy_setting(bp);
+ mutex_unlock(&bp->link_lock);
+ if (rc) {
+ netdev_warn(bp->dev, "update phy settings retry failed\n");
+ } else {
+ bp->link_info.phy_retry = false;
+ netdev_info(bp->dev, "update phy settings retry succeeded\n");
+ }
+ }
if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
mutex_lock(&bp->link_lock);
bnxt_get_port_module_status(bp);
@@ -7731,7 +7868,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx_rings <<= 1;
cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
- if (bp->flags & BNXT_FLAG_NEW_RM)
+ if (BNXT_NEW_RM(bp))
cp += bnxt_get_ulp_msix_num(bp);
return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
vnics);
@@ -7991,7 +8128,7 @@ static int bnxt_setup_tc_block(struct net_device *dev,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb,
- bp, bp);
+ bp, bp, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp);
return 0;
@@ -8740,7 +8877,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
- if (bp->flags & BNXT_FLAG_SHORT_CMD) {
+ if (bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) {
rc = bnxt_alloc_hwrm_short_cmd_req(bp);
if (rc)
goto init_err_pci_clean;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 91575ef97c8c..fefa011320e0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -12,11 +12,11 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.9.1"
+#define DRV_MODULE_VERSION "1.9.2"
#define DRV_VER_MAJ 1
#define DRV_VER_MIN 9
-#define DRV_VER_UPD 1
+#define DRV_VER_UPD 2
#include <linux/interrupt.h>
#include <linux/rhashtable.h>
@@ -326,6 +326,10 @@ struct rx_tpa_start_cmp_ext {
((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \
RX_TPA_START_CMP_CFA_CODE) >> RX_TPA_START_CMPL_CFA_CODE_SHIFT)
+#define TPA_START_IS_IPV6(rx_tpa_start) \
+ (!!((rx_tpa_start)->rx_tpa_start_cmp_flags2 & \
+ cpu_to_le32(RX_TPA_START_CMP_FLAGS2_IP_TYPE)))
+
struct rx_tpa_end_cmp {
__le32 rx_tpa_end_cmp_len_flags_type;
#define RX_TPA_END_CMP_TYPE (0x3f << 0)
@@ -862,6 +866,7 @@ struct bnxt_pf_info {
u8 vf_resv_strategy;
#define BNXT_VF_RESV_STRATEGY_MAXIMAL 0
#define BNXT_VF_RESV_STRATEGY_MINIMAL 1
+#define BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC 2
void *hwrm_cmd_req_addr[4];
dma_addr_t hwrm_cmd_req_dma_addr[4];
struct bnxt_vf_info *vf;
@@ -959,6 +964,9 @@ struct bnxt_link_info {
u16 advertising; /* user adv setting */
bool force_link_chng;
+ bool phy_retry;
+ unsigned long phy_retry_expires;
+
/* a copy of phy_qcfg output used to report link
* info to VF
*/
@@ -990,6 +998,8 @@ struct bnxt_led_info {
struct bnxt_test_info {
u8 offline_mask;
+ u8 flags;
+#define BNXT_TEST_FL_EXT_LPBK 0x1
u16 timeout;
char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];
};
@@ -1134,7 +1144,6 @@ struct bnxt {
atomic_t intr_sem;
u32 flags;
- #define BNXT_FLAG_DCB_ENABLED 0x1
#define BNXT_FLAG_VF 0x2
#define BNXT_FLAG_LRO 0x4
#ifdef CONFIG_INET
@@ -1163,15 +1172,11 @@ struct bnxt {
BNXT_FLAG_ROCEV2_CAP)
#define BNXT_FLAG_NO_AGG_RINGS 0x20000
#define BNXT_FLAG_RX_PAGE_MODE 0x40000
- #define BNXT_FLAG_FW_LLDP_AGENT 0x80000
#define BNXT_FLAG_MULTI_HOST 0x100000
- #define BNXT_FLAG_SHORT_CMD 0x200000
#define BNXT_FLAG_DOUBLE_DB 0x400000
- #define BNXT_FLAG_FW_DCBX_AGENT 0x800000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_DIM 0x2000000
#define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
- #define BNXT_FLAG_NEW_RM 0x8000000
#define BNXT_FLAG_PORT_STATS_EXT 0x10000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
@@ -1276,10 +1281,19 @@ struct bnxt {
struct ieee_ets *ieee_ets;
u8 dcbx_cap;
u8 default_pri;
+ u8 max_dscp_value;
#endif /* CONFIG_BNXT_DCB */
u32 msg_enable;
+ u32 fw_cap;
+ #define BNXT_FW_CAP_SHORT_CMD 0x00000001
+ #define BNXT_FW_CAP_LLDP_AGENT 0x00000002
+ #define BNXT_FW_CAP_DCBX_AGENT 0x00000004
+ #define BNXT_FW_CAP_NEW_RM 0x00000008
+ #define BNXT_FW_CAP_IF_CHANGE 0x00000010
+
+#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
u32 hwrm_intr_seq_id;
@@ -1287,9 +1301,6 @@ struct bnxt {
dma_addr_t hwrm_short_cmd_req_dma_addr;
void *hwrm_cmd_resp_addr;
dma_addr_t hwrm_cmd_resp_dma_addr;
- void *hwrm_dbg_resp_addr;
- dma_addr_t hwrm_dbg_resp_dma_addr;
-#define HWRM_DBG_REG_BUF_SIZE 128
struct rx_port_stats *hw_rx_port_stats;
struct tx_port_stats *hw_tx_port_stats;
@@ -1345,6 +1356,7 @@ struct bnxt {
#define BNXT_GENEVE_DEL_PORT_SP_EVENT 13
#define BNXT_LINK_SPEED_CHNG_SP_EVENT 14
#define BNXT_FLOW_STATS_SP_EVENT 15
+#define BNXT_UPDATE_PHY_SP_EVENT 16
struct bnxt_hw_resc hw_resc;
struct bnxt_pf_info pf;
@@ -1400,6 +1412,7 @@ struct bnxt {
struct bnxt_tc_info *tc_info;
struct dentry *debugfs_pdev;
struct dentry *debugfs_dim;
+ struct device *hwmon_dev;
};
#define BNXT_RX_STATS_OFFSET(counter) \
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
new file mode 100644
index 000000000000..09c22f8fe399
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
@@ -0,0 +1,66 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2018 Broadcom Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_COREDUMP_H
+#define BNXT_COREDUMP_H
+
+struct bnxt_coredump_segment_hdr {
+ __u8 signature[4];
+ __le32 component_id;
+ __le32 segment_id;
+ __le32 flags;
+ __u8 low_version;
+ __u8 high_version;
+ __le16 function_id;
+ __le32 offset;
+ __le32 length;
+ __le32 status;
+ __le32 duration;
+ __le32 data_offset;
+ __le32 instance;
+ __le32 rsvd[5];
+};
+
+struct bnxt_coredump_record {
+ __u8 signature[4];
+ __le32 flags;
+ __u8 low_version;
+ __u8 high_version;
+ __u8 asic_state;
+ __u8 rsvd0[5];
+ char system_name[32];
+ __le16 year;
+ __le16 month;
+ __le16 day;
+ __le16 hour;
+ __le16 minute;
+ __le16 second;
+ __le16 utc_bias;
+ __le16 rsvd1;
+ char commandline[256];
+ __le32 total_segments;
+ __le32 os_ver_major;
+ __le32 os_ver_minor;
+ __le32 rsvd2;
+ char os_name[32];
+ __le16 end_year;
+ __le16 end_month;
+ __le16 end_day;
+ __le16 end_hour;
+ __le16 end_minute;
+ __le16 end_second;
+ __le16 end_utc_bias;
+ __le32 asic_id1;
+ __le32 asic_id2;
+ __le32 coredump_status;
+ __u8 ioctl_low_version;
+ __u8 ioctl_high_version;
+ __le16 rsvd3[313];
+};
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index d5bc72cecde3..ddc98c359488 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -385,6 +385,61 @@ set_app_exit:
return rc;
}
+static int bnxt_hwrm_queue_dscp_qcaps(struct bnxt *bp)
+{
+ struct hwrm_queue_dscp_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_queue_dscp_qcaps_input req = {0};
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10800 || BNXT_VF(bp))
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP_QCAPS, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ bp->max_dscp_value = (1 << resp->num_dscp_bits) - 1;
+ if (bp->max_dscp_value < 0x3f)
+ bp->max_dscp_value = 0;
+ }
+
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app,
+ bool add)
+{
+ struct hwrm_queue_dscp2pri_cfg_input req = {0};
+ struct bnxt_dscp2pri_entry *dscp2pri;
+ dma_addr_t mapping;
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10800)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP2PRI_CFG, -1, -1);
+ dscp2pri = dma_alloc_coherent(&bp->pdev->dev, sizeof(*dscp2pri),
+ &mapping, GFP_KERNEL);
+ if (!dscp2pri)
+ return -ENOMEM;
+
+ req.src_data_addr = cpu_to_le64(mapping);
+ dscp2pri->dscp = app->protocol;
+ if (add)
+ dscp2pri->mask = 0x3f;
+ else
+ dscp2pri->mask = 0;
+ dscp2pri->pri = app->priority;
+ req.entry_cnt = cpu_to_le16(1);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ rc = -EIO;
+ dma_free_coherent(&bp->pdev->dev, sizeof(*dscp2pri), dscp2pri,
+ mapping);
+ return rc;
+}
+
static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
{
int total_ets_bw = 0;
@@ -551,15 +606,30 @@ static int bnxt_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
return rc;
}
+static int bnxt_dcbnl_ieee_dscp_app_prep(struct bnxt *bp, struct dcb_app *app)
+{
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP) {
+ if (!bp->max_dscp_value)
+ return -ENOTSUPP;
+ if (app->protocol > bp->max_dscp_value)
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
{
struct bnxt *bp = netdev_priv(dev);
- int rc = -EINVAL;
+ int rc;
if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
!(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
return -EINVAL;
+ rc = bnxt_dcbnl_ieee_dscp_app_prep(bp, app);
+ if (rc)
+ return rc;
+
rc = dcb_ieee_setapp(dev, app);
if (rc)
return rc;
@@ -570,6 +640,9 @@ static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
app->protocol == ROCE_V2_UDP_DPORT))
rc = bnxt_hwrm_set_dcbx_app(bp, app, true);
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ rc = bnxt_hwrm_queue_dscp2pri_cfg(bp, app, true);
+
return rc;
}
@@ -582,6 +655,10 @@ static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
!(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
return -EINVAL;
+ rc = bnxt_dcbnl_ieee_dscp_app_prep(bp, app);
+ if (rc)
+ return rc;
+
rc = dcb_ieee_delapp(dev, app);
if (rc)
return rc;
@@ -591,6 +668,9 @@ static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
app->protocol == ROCE_V2_UDP_DPORT))
rc = bnxt_hwrm_set_dcbx_app(bp, app, false);
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ rc = bnxt_hwrm_queue_dscp2pri_cfg(bp, app, false);
+
return rc;
}
@@ -610,7 +690,7 @@ static u8 bnxt_dcbnl_setdcbx(struct net_device *dev, u8 mode)
return 1;
if (mode & DCB_CAP_DCBX_HOST) {
- if (BNXT_VF(bp) || (bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
+ if (BNXT_VF(bp) || (bp->fw_cap & BNXT_FW_CAP_LLDP_AGENT))
return 1;
/* only support IEEE */
@@ -642,10 +722,11 @@ void bnxt_dcb_init(struct bnxt *bp)
if (bp->hwrm_spec_code < 0x10501)
return;
+ bnxt_hwrm_queue_dscp_qcaps(bp);
bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE;
- if (BNXT_PF(bp) && !(bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
+ if (BNXT_PF(bp) && !(bp->fw_cap & BNXT_FW_CAP_LLDP_AGENT))
bp->dcbx_cap |= DCB_CAP_DCBX_HOST;
- else if (bp->flags & BNXT_FLAG_FW_DCBX_AGENT)
+ else if (bp->fw_cap & BNXT_FW_CAP_DCBX_AGENT)
bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
bp->dev->dcbnl_ops = &dcbnl_ops;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
index 69efde785f23..6eed231de565 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
@@ -33,10 +33,20 @@ struct bnxt_cos2bw_cfg {
u8 unused;
};
+struct bnxt_dscp2pri_entry {
+ u8 dscp;
+ u8 mask;
+ u8 pri;
+};
+
#define BNXT_LLQ(q_profile) \
((q_profile) == \
QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE)
+#define BNXT_CNPQ(q_profile) \
+ ((q_profile) == \
+ QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP)
+
#define HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL 0x0300
void bnxt_dcb_init(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 402fa32f7a88..f3b9fbcc705b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -21,16 +21,99 @@ static const struct devlink_ops bnxt_dl_ops = {
#endif /* CONFIG_BNXT_SRIOV */
};
+static const struct bnxt_dl_nvm_param nvm_params[] = {
+ {DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV,
+ BNXT_NVM_SHARED_CFG, 1},
+};
+
+static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
+ int msg_len, union devlink_param_value *val)
+{
+ struct hwrm_nvm_get_variable_input *req = msg;
+ void *data_addr = NULL, *buf = NULL;
+ struct bnxt_dl_nvm_param nvm_param;
+ int bytesize, idx = 0, rc, i;
+ dma_addr_t data_dma_addr;
+
+ /* Get/Set NVM CFG parameter is supported only on PFs */
+ if (BNXT_VF(bp))
+ return -EPERM;
+
+ for (i = 0; i < ARRAY_SIZE(nvm_params); i++) {
+ if (nvm_params[i].id == param_id) {
+ nvm_param = nvm_params[i];
+ break;
+ }
+ }
+
+ if (nvm_param.dir_type == BNXT_NVM_PORT_CFG)
+ idx = bp->pf.port_id;
+ else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
+ idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID;
+
+ bytesize = roundup(nvm_param.num_bits, BITS_PER_BYTE) / BITS_PER_BYTE;
+ if (nvm_param.num_bits == 1)
+ buf = &val->vbool;
+
+ data_addr = dma_zalloc_coherent(&bp->pdev->dev, bytesize,
+ &data_dma_addr, GFP_KERNEL);
+ if (!data_addr)
+ return -ENOMEM;
+
+ req->dest_data_addr = cpu_to_le64(data_dma_addr);
+ req->data_len = cpu_to_le16(nvm_param.num_bits);
+ req->option_num = cpu_to_le16(nvm_param.offset);
+ req->index_0 = cpu_to_le16(idx);
+ if (idx)
+ req->dimensions = cpu_to_le16(1);
+
+ if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE))
+ memcpy(data_addr, buf, bytesize);
+
+ rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
+ if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE))
+ memcpy(buf, data_addr, bytesize);
+
+ dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr);
+ if (rc)
+ return -EIO;
+ return 0;
+}
+
+static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct hwrm_nvm_get_variable_input req = {0};
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1);
+ return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
+}
+
+static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct hwrm_nvm_set_variable_input req = {0};
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_SET_VARIABLE, -1, -1);
+ return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
+}
+
+static const struct devlink_param bnxt_dl_params[] = {
+ DEVLINK_PARAM_GENERIC(ENABLE_SRIOV,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
+ NULL),
+};
+
int bnxt_dl_register(struct bnxt *bp)
{
struct devlink *dl;
int rc;
- if (!pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
- return 0;
-
- if (bp->hwrm_spec_code < 0x10803) {
- netdev_warn(bp->dev, "Firmware does not support SR-IOV E-Switch SWITCHDEV mode.\n");
+ if (bp->hwrm_spec_code < 0x10600) {
+ netdev_warn(bp->dev, "Firmware does not support NVM params");
return -ENOTSUPP;
}
@@ -41,16 +124,34 @@ int bnxt_dl_register(struct bnxt *bp)
}
bnxt_link_bp_to_dl(bp, dl);
- bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
+
+ /* Add switchdev eswitch mode setting, if SRIOV supported */
+ if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV) &&
+ bp->hwrm_spec_code > 0x10803)
+ bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
+
rc = devlink_register(dl, &bp->pdev->dev);
if (rc) {
- bnxt_link_bp_to_dl(bp, NULL);
- devlink_free(dl);
netdev_warn(bp->dev, "devlink_register failed. rc=%d", rc);
- return rc;
+ goto err_dl_free;
+ }
+
+ rc = devlink_params_register(dl, bnxt_dl_params,
+ ARRAY_SIZE(bnxt_dl_params));
+ if (rc) {
+ netdev_warn(bp->dev, "devlink_params_register failed. rc=%d",
+ rc);
+ goto err_dl_unreg;
}
return 0;
+
+err_dl_unreg:
+ devlink_unregister(dl);
+err_dl_free:
+ bnxt_link_bp_to_dl(bp, NULL);
+ devlink_free(dl);
+ return rc;
}
void bnxt_dl_unregister(struct bnxt *bp)
@@ -60,6 +161,8 @@ void bnxt_dl_unregister(struct bnxt *bp)
if (!dl)
return;
+ devlink_params_unregister(dl, bnxt_dl_params,
+ ARRAY_SIZE(bnxt_dl_params));
devlink_unregister(dl);
devlink_free(dl);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index e92a35d8b642..2f68dc048390 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -33,6 +33,21 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
}
}
+#define NVM_OFF_ENABLE_SRIOV 401
+
+enum bnxt_nvm_dir_type {
+ BNXT_NVM_SHARED_CFG = 40,
+ BNXT_NVM_PORT_CFG,
+ BNXT_NVM_FUNC_CFG,
+};
+
+struct bnxt_dl_nvm_param {
+ u16 id;
+ u16 offset;
+ u16 dir_type;
+ u16 num_bits;
+};
+
int bnxt_dl_register(struct bnxt *bp);
void bnxt_dl_unregister(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 7270c8b0cef3..e52d7af3ab3e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -16,12 +16,15 @@
#include <linux/etherdevice.h>
#include <linux/crc32.h>
#include <linux/firmware.h>
+#include <linux/utsname.h>
+#include <linux/time.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_xdp.h"
#include "bnxt_ethtool.h"
#include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
+#include "bnxt_coredump.h"
#define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100)
#define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
#define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
@@ -112,6 +115,11 @@ static int bnxt_set_coalesce(struct net_device *dev,
BNXT_MAX_STATS_COAL_TICKS);
stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
bp->stats_coal_ticks = stats_ticks;
+ if (bp->stats_coal_ticks)
+ bp->current_interval =
+ bp->stats_coal_ticks * HZ / 1000000;
+ else
+ bp->current_interval = BNXT_TIMER_INTERVAL;
update_stats = true;
}
@@ -162,7 +170,7 @@ static const struct {
BNXT_RX_STATS_ENTRY(rx_128b_255b_frames),
BNXT_RX_STATS_ENTRY(rx_256b_511b_frames),
BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames),
- BNXT_RX_STATS_ENTRY(rx_1024b_1518_frames),
+ BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames),
BNXT_RX_STATS_ENTRY(rx_good_vlan_frames),
BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames),
BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames),
@@ -205,9 +213,9 @@ static const struct {
BNXT_TX_STATS_ENTRY(tx_128b_255b_frames),
BNXT_TX_STATS_ENTRY(tx_256b_511b_frames),
BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames),
- BNXT_TX_STATS_ENTRY(tx_1024b_1518_frames),
+ BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames),
BNXT_TX_STATS_ENTRY(tx_good_vlan_frames),
- BNXT_TX_STATS_ENTRY(tx_1519b_2047_frames),
+ BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames),
BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames),
BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames),
BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames),
@@ -463,7 +471,7 @@ static void bnxt_get_channels(struct net_device *dev,
int max_tx_sch_inputs;
/* Get the most up-to-date max_tx_sch_inputs. */
- if (bp->flags & BNXT_FLAG_NEW_RM)
+ if (BNXT_NEW_RM(bp))
bnxt_hwrm_func_resc_qcaps(bp, false);
max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
@@ -2392,7 +2400,7 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
return rc;
}
-static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable)
+static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext)
{
struct hwrm_port_phy_cfg_input req = {0};
@@ -2400,7 +2408,10 @@ static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable)
if (enable) {
bnxt_disable_an_for_lpbk(bp, &req);
- req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
+ if (ext)
+ req.lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL;
+ else
+ req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
} else {
req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
}
@@ -2533,15 +2544,17 @@ static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)
return rc;
}
-#define BNXT_DRV_TESTS 3
+#define BNXT_DRV_TESTS 4
#define BNXT_MACLPBK_TEST_IDX (bp->num_tests - BNXT_DRV_TESTS)
#define BNXT_PHYLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 1)
-#define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2)
+#define BNXT_EXTLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2)
+#define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 3)
static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
u64 *buf)
{
struct bnxt *bp = netdev_priv(dev);
+ bool do_ext_lpbk = false;
bool offline = false;
u8 test_results = 0;
u8 test_mask = 0;
@@ -2555,6 +2568,10 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
return;
}
+ if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) &&
+ (bp->test_info->flags & BNXT_TEST_FL_EXT_LPBK))
+ do_ext_lpbk = true;
+
if (etest->flags & ETH_TEST_FL_OFFLINE) {
if (bp->pf.active_vfs) {
etest->flags |= ETH_TEST_FL_FAILED;
@@ -2595,13 +2612,22 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
buf[BNXT_MACLPBK_TEST_IDX] = 0;
bnxt_hwrm_mac_loopback(bp, false);
- bnxt_hwrm_phy_loopback(bp, true);
+ bnxt_hwrm_phy_loopback(bp, true, false);
msleep(1000);
if (bnxt_run_loopback(bp)) {
buf[BNXT_PHYLPBK_TEST_IDX] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
- bnxt_hwrm_phy_loopback(bp, false);
+ if (do_ext_lpbk) {
+ etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+ bnxt_hwrm_phy_loopback(bp, true, true);
+ msleep(1000);
+ if (bnxt_run_loopback(bp)) {
+ buf[BNXT_EXTLPBK_TEST_IDX] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+ }
+ bnxt_hwrm_phy_loopback(bp, false, false);
bnxt_half_close_nic(bp);
bnxt_open_nic(bp, false, true);
}
@@ -2662,6 +2688,331 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
return rc;
}
+static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len,
+ struct bnxt_hwrm_dbg_dma_info *info)
+{
+ struct hwrm_dbg_cmn_output *cmn_resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_dbg_cmn_input *cmn_req = msg;
+ __le16 *seq_ptr = msg + info->seq_off;
+ u16 seq = 0, len, segs_off;
+ void *resp = cmn_resp;
+ dma_addr_t dma_handle;
+ int rc, off = 0;
+ void *dma_buf;
+
+ dma_buf = dma_alloc_coherent(&bp->pdev->dev, info->dma_len, &dma_handle,
+ GFP_KERNEL);
+ if (!dma_buf)
+ return -ENOMEM;
+
+ segs_off = offsetof(struct hwrm_dbg_coredump_list_output,
+ total_segments);
+ cmn_req->host_dest_addr = cpu_to_le64(dma_handle);
+ cmn_req->host_buf_len = cpu_to_le32(info->dma_len);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ while (1) {
+ *seq_ptr = cpu_to_le16(seq);
+ rc = _hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+
+ len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off)));
+ if (!seq &&
+ cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
+ info->segs = le16_to_cpu(*((__le16 *)(resp +
+ segs_off)));
+ if (!info->segs) {
+ rc = -EIO;
+ break;
+ }
+
+ info->dest_buf_size = info->segs *
+ sizeof(struct coredump_segment_record);
+ info->dest_buf = kmalloc(info->dest_buf_size,
+ GFP_KERNEL);
+ if (!info->dest_buf) {
+ rc = -ENOMEM;
+ break;
+ }
+ }
+
+ if (info->dest_buf)
+ memcpy(info->dest_buf + off, dma_buf, len);
+
+ if (cmn_req->req_type ==
+ cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
+ info->dest_buf_size += len;
+
+ if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
+ break;
+
+ seq++;
+ off += len;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ dma_free_coherent(&bp->pdev->dev, info->dma_len, dma_buf, dma_handle);
+ return rc;
+}
+
+static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
+ struct bnxt_coredump *coredump)
+{
+ struct hwrm_dbg_coredump_list_input req = {0};
+ struct bnxt_hwrm_dbg_dma_info info = {NULL};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_LIST, -1, -1);
+
+ info.dma_len = COREDUMP_LIST_BUF_LEN;
+ info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no);
+ info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output,
+ data_len);
+
+ rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
+ if (!rc) {
+ coredump->data = info.dest_buf;
+ coredump->data_size = info.dest_buf_size;
+ coredump->total_segs = info.segs;
+ }
+ return rc;
+}
+
+static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
+ u16 segment_id)
+{
+ struct hwrm_dbg_coredump_initiate_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_INITIATE, -1, -1);
+ req.component_id = cpu_to_le16(component_id);
+ req.segment_id = cpu_to_le16(segment_id);
+
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
+ u16 segment_id, u32 *seg_len,
+ void *buf, u32 offset)
+{
+ struct hwrm_dbg_coredump_retrieve_input req = {0};
+ struct bnxt_hwrm_dbg_dma_info info = {NULL};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_RETRIEVE, -1, -1);
+ req.component_id = cpu_to_le16(component_id);
+ req.segment_id = cpu_to_le16(segment_id);
+
+ info.dma_len = COREDUMP_RETRIEVE_BUF_LEN;
+ info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input,
+ seq_no);
+ info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output,
+ data_len);
+ if (buf)
+ info.dest_buf = buf + offset;
+
+ rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
+ if (!rc)
+ *seg_len = info.dest_buf_size;
+
+ return rc;
+}
+
+static void
+bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
+ struct bnxt_coredump_segment_hdr *seg_hdr,
+ struct coredump_segment_record *seg_rec, u32 seg_len,
+ int status, u32 duration, u32 instance)
+{
+ memset(seg_hdr, 0, sizeof(*seg_hdr));
+ memcpy(seg_hdr->signature, "sEgM", 4);
+ if (seg_rec) {
+ seg_hdr->component_id = (__force __le32)seg_rec->component_id;
+ seg_hdr->segment_id = (__force __le32)seg_rec->segment_id;
+ seg_hdr->low_version = seg_rec->version_low;
+ seg_hdr->high_version = seg_rec->version_hi;
+ } else {
+ /* For hwrm_ver_get response Component id = 2
+ * and Segment id = 0
+ */
+ seg_hdr->component_id = cpu_to_le32(2);
+ seg_hdr->segment_id = 0;
+ }
+ seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn);
+ seg_hdr->length = cpu_to_le32(seg_len);
+ seg_hdr->status = cpu_to_le32(status);
+ seg_hdr->duration = cpu_to_le32(duration);
+ seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr));
+ seg_hdr->instance = cpu_to_le32(instance);
+}
+
+static void
+bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
+ time64_t start, s16 start_utc, u16 total_segs,
+ int status)
+{
+ time64_t end = ktime_get_real_seconds();
+ u32 os_ver_major = 0, os_ver_minor = 0;
+ struct tm tm;
+
+ time64_to_tm(start, 0, &tm);
+ memset(record, 0, sizeof(*record));
+ memcpy(record->signature, "cOrE", 4);
+ record->flags = 0;
+ record->low_version = 0;
+ record->high_version = 1;
+ record->asic_state = 0;
+ strlcpy(record->system_name, utsname()->nodename,
+ sizeof(record->system_name));
+ record->year = cpu_to_le16(tm.tm_year);
+ record->month = cpu_to_le16(tm.tm_mon);
+ record->day = cpu_to_le16(tm.tm_mday);
+ record->hour = cpu_to_le16(tm.tm_hour);
+ record->minute = cpu_to_le16(tm.tm_min);
+ record->second = cpu_to_le16(tm.tm_sec);
+ record->utc_bias = cpu_to_le16(start_utc);
+ strcpy(record->commandline, "ethtool -w");
+ record->total_segments = cpu_to_le32(total_segs);
+
+ sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor);
+ record->os_ver_major = cpu_to_le32(os_ver_major);
+ record->os_ver_minor = cpu_to_le32(os_ver_minor);
+
+ strlcpy(record->os_name, utsname()->sysname, 32);
+ time64_to_tm(end, 0, &tm);
+ record->end_year = cpu_to_le16(tm.tm_year + 1900);
+ record->end_month = cpu_to_le16(tm.tm_mon + 1);
+ record->end_day = cpu_to_le16(tm.tm_mday);
+ record->end_hour = cpu_to_le16(tm.tm_hour);
+ record->end_minute = cpu_to_le16(tm.tm_min);
+ record->end_second = cpu_to_le16(tm.tm_sec);
+ record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60);
+ record->asic_id1 = cpu_to_le32(bp->chip_num << 16 |
+ bp->ver_resp.chip_rev << 8 |
+ bp->ver_resp.chip_metal);
+ record->asic_id2 = 0;
+ record->coredump_status = cpu_to_le32(status);
+ record->ioctl_low_version = 0;
+ record->ioctl_high_version = 0;
+}
+
+static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
+{
+ u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
+ struct coredump_segment_record *seg_record = NULL;
+ u32 offset = 0, seg_hdr_len, seg_record_len;
+ struct bnxt_coredump_segment_hdr seg_hdr;
+ struct bnxt_coredump coredump = {NULL};
+ time64_t start_time;
+ u16 start_utc;
+ int rc = 0, i;
+
+ start_time = ktime_get_real_seconds();
+ start_utc = sys_tz.tz_minuteswest * 60;
+ seg_hdr_len = sizeof(seg_hdr);
+
+ /* First segment should be hwrm_ver_get response */
+ *dump_len = seg_hdr_len + ver_get_resp_len;
+ if (buf) {
+ bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len,
+ 0, 0, 0);
+ memcpy(buf + offset, &seg_hdr, seg_hdr_len);
+ offset += seg_hdr_len;
+ memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len);
+ offset += ver_get_resp_len;
+ }
+
+ rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump);
+ if (rc) {
+ netdev_err(bp->dev, "Failed to get coredump segment list\n");
+ goto err;
+ }
+
+ *dump_len += seg_hdr_len * coredump.total_segs;
+
+ seg_record = (struct coredump_segment_record *)coredump.data;
+ seg_record_len = sizeof(*seg_record);
+
+ for (i = 0; i < coredump.total_segs; i++) {
+ u16 comp_id = le16_to_cpu(seg_record->component_id);
+ u16 seg_id = le16_to_cpu(seg_record->segment_id);
+ u32 duration = 0, seg_len = 0;
+ unsigned long start, end;
+
+ start = jiffies;
+
+ rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
+ if (rc) {
+ netdev_err(bp->dev,
+ "Failed to initiate coredump for seg = %d\n",
+ seg_record->segment_id);
+ goto next_seg;
+ }
+
+ /* Write segment data into the buffer */
+ rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id,
+ &seg_len, buf,
+ offset + seg_hdr_len);
+ if (rc)
+ netdev_err(bp->dev,
+ "Failed to retrieve coredump for seg = %d\n",
+ seg_record->segment_id);
+
+next_seg:
+ end = jiffies;
+ duration = jiffies_to_msecs(end - start);
+ bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len,
+ rc, duration, 0);
+
+ if (buf) {
+ /* Write segment header into the buffer */
+ memcpy(buf + offset, &seg_hdr, seg_hdr_len);
+ offset += seg_hdr_len + seg_len;
+ }
+
+ *dump_len += seg_len;
+ seg_record =
+ (struct coredump_segment_record *)((u8 *)seg_record +
+ seg_record_len);
+ }
+
+err:
+ if (buf)
+ bnxt_fill_coredump_record(bp, buf + offset, start_time,
+ start_utc, coredump.total_segs + 1,
+ rc);
+ kfree(coredump.data);
+ *dump_len += sizeof(struct bnxt_coredump_record);
+
+ return rc;
+}
+
+static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (bp->hwrm_spec_code < 0x10801)
+ return -EOPNOTSUPP;
+
+ dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 |
+ bp->ver_resp.hwrm_fw_min_8b << 16 |
+ bp->ver_resp.hwrm_fw_bld_8b << 8 |
+ bp->ver_resp.hwrm_fw_rsvd_8b;
+
+ return bnxt_get_coredump(bp, NULL, &dump->len);
+}
+
+static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
+ void *buf)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (bp->hwrm_spec_code < 0x10801)
+ return -EOPNOTSUPP;
+
+ memset(buf, 0, dump->len);
+
+ return bnxt_get_coredump(bp, buf, &dump->len);
+}
+
void bnxt_ethtool_init(struct bnxt *bp)
{
struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr;
@@ -2702,6 +3053,8 @@ void bnxt_ethtool_init(struct bnxt *bp)
strcpy(str, "Mac loopback test (offline)");
} else if (i == BNXT_PHYLPBK_TEST_IDX) {
strcpy(str, "Phy loopback test (offline)");
+ } else if (i == BNXT_EXTLPBK_TEST_IDX) {
+ strcpy(str, "Ext loopback test (offline)");
} else if (i == BNXT_IRQ_TEST_IDX) {
strcpy(str, "Interrupt_test (offline)");
} else {
@@ -2763,4 +3116,6 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.set_phys_id = bnxt_set_phys_id,
.self_test = bnxt_self_test,
.reset = bnxt_reset,
+ .get_dump_flag = bnxt_get_dump_flag,
+ .get_dump_data = bnxt_get_dump_data,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index 836ef682f24c..b5b65b3f8534 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -22,6 +22,43 @@ struct bnxt_led_cfg {
u8 rsvd;
};
+#define COREDUMP_LIST_BUF_LEN 2048
+#define COREDUMP_RETRIEVE_BUF_LEN 4096
+
+struct bnxt_coredump {
+ void *data;
+ int data_size;
+ u16 total_segs;
+};
+
+struct bnxt_hwrm_dbg_dma_info {
+ void *dest_buf;
+ int dest_buf_size;
+ u16 dma_len;
+ u16 seq_off;
+ u16 data_len_off;
+ u16 segs;
+};
+
+struct hwrm_dbg_cmn_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 host_buf_len;
+};
+
+struct hwrm_dbg_cmn_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define HWRM_DBG_CMN_FLAGS_MORE 1
+};
+
#define BNXT_LED_DFLT_ENA \
(PORT_LED_CFG_REQ_ENABLES_LED0_ID | \
PORT_LED_CFG_REQ_ENABLES_LED0_STATE | \
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 0fe0ea8dce6c..971ace5d0d4a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -96,6 +96,7 @@ struct hwrm_short_input {
struct cmd_nums {
__le16 req_type;
#define HWRM_VER_GET 0x0UL
+ #define HWRM_FUNC_DRV_IF_CHANGE 0xdUL
#define HWRM_FUNC_BUF_UNRGTR 0xeUL
#define HWRM_FUNC_VF_CFG 0xfUL
#define HWRM_RESERVED1 0x10UL
@@ -159,6 +160,7 @@ struct cmd_nums {
#define HWRM_RING_FREE 0x51UL
#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL
#define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL
+ #define HWRM_RING_AGGINT_QCAPS 0x54UL
#define HWRM_RING_RESET 0x5eUL
#define HWRM_RING_GRP_ALLOC 0x60UL
#define HWRM_RING_GRP_FREE 0x61UL
@@ -191,6 +193,8 @@ struct cmd_nums {
#define HWRM_PORT_QSTATS_EXT 0xb4UL
#define HWRM_FW_RESET 0xc0UL
#define HWRM_FW_QSTATUS 0xc1UL
+ #define HWRM_FW_HEALTH_CHECK 0xc2UL
+ #define HWRM_FW_SYNC 0xc3UL
#define HWRM_FW_SET_TIME 0xc8UL
#define HWRM_FW_GET_TIME 0xc9UL
#define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL
@@ -269,6 +273,11 @@ struct cmd_nums {
#define HWRM_ENGINE_ON_DIE_RQE_CREDITS 0x164UL
#define HWRM_FUNC_RESOURCE_QCAPS 0x190UL
#define HWRM_FUNC_VF_RESOURCE_CFG 0x191UL
+ #define HWRM_FUNC_BACKING_STORE_QCAPS 0x192UL
+ #define HWRM_FUNC_BACKING_STORE_CFG 0x193UL
+ #define HWRM_FUNC_BACKING_STORE_QCFG 0x194UL
+ #define HWRM_FUNC_VF_BW_CFG 0x195UL
+ #define HWRM_FUNC_VF_BW_QCFG 0x196UL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -284,6 +293,8 @@ struct cmd_nums {
#define HWRM_DBG_COREDUMP_LIST 0xff17UL
#define HWRM_DBG_COREDUMP_INITIATE 0xff18UL
#define HWRM_DBG_COREDUMP_RETRIEVE 0xff19UL
+ #define HWRM_DBG_FW_CLI 0xff1aUL
+ #define HWRM_DBG_I2C_CMD 0xff1bUL
#define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
#define HWRM_NVM_VALIDATE_OPTION 0xffefUL
#define HWRM_NVM_FLUSH 0xfff0UL
@@ -318,6 +329,7 @@ struct ret_codes {
#define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL
#define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
#define HWRM_ERR_CODE_NO_BUFFER 0x8UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL
#define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
#define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
#define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
@@ -344,9 +356,9 @@ struct hwrm_err_output {
#define HWRM_RESP_VALID_KEY 1
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 9
-#define HWRM_VERSION_UPDATE 1
-#define HWRM_VERSION_RSVD 15
-#define HWRM_VERSION_STR "1.9.1.15"
+#define HWRM_VERSION_UPDATE 2
+#define HWRM_VERSION_RSVD 25
+#define HWRM_VERSION_STR "1.9.2.25"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -526,6 +538,7 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
#define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
#define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
__le32 event_data2;
@@ -564,6 +577,8 @@ struct hwrm_async_event_cmpl_link_status_change {
#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_MASK 0xff00000UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_SFT 20
};
/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */
@@ -817,23 +832,26 @@ struct hwrm_func_qcaps_output {
__le16 fid;
__le16 port_id;
__le32 flags;
- #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
- #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
- #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
- #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
- #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
- #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
- #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
- #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
- #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
- #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
- #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL
- #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL
- #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL
- #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL
- #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ADOPTED_PF_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
@@ -947,58 +965,26 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
#define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA
u8 options;
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT 0
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128
- #define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK 0xfcUL
- #define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT 2
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT 0
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_SFT 2
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO
+ #define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK 0xf0UL
+ #define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT 4
__le16 alloc_vfs;
__le32 alloc_mcast_filters;
__le32 alloc_hw_ring_grps;
__le16 alloc_sp_tx_rings;
__le16 alloc_stat_ctx;
- u8 unused_2[7];
- u8 valid;
-};
-
-/* hwrm_func_vlan_cfg_input (size:384b/48B) */
-struct hwrm_func_vlan_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0[2];
- __le32 enables;
- #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID 0x1UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID 0x2UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP 0x4UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP 0x8UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID 0x10UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID 0x20UL
- __le16 stag_vid;
- u8 stag_pcp;
- u8 unused_1;
- __be16 stag_tpid;
- __le16 ctag_vid;
- u8 ctag_pcp;
- u8 unused_2;
- __be16 ctag_tpid;
- __le32 rsvd1;
- __le32 rsvd2;
- u8 unused_3[4];
-};
-
-/* hwrm_func_vlan_cfg_output (size:128b/16B) */
-struct hwrm_func_vlan_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
+ __le16 alloc_msix;
+ u8 unused_2[5];
u8 valid;
};
@@ -1010,7 +996,7 @@ struct hwrm_func_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le16 fid;
- u8 unused_0[2];
+ __le16 num_msix;
__le32 flags;
#define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL
#define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL
@@ -1050,6 +1036,8 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
#define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
#define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL
+ #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL
__le16 mtu;
__le16 mru;
__le16 num_rsscos_ctxs;
@@ -1109,13 +1097,19 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
#define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA
u8 options;
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT 0
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128
- #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xfcUL
- #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 2
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT 0
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_SFT 2
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO
+ #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xf0UL
+ #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 4
__le16 num_mcast_filters;
};
@@ -1212,30 +1206,6 @@ struct hwrm_func_vf_resc_free_output {
u8 valid;
};
-/* hwrm_func_vf_vnic_ids_query_input (size:256b/32B) */
-struct hwrm_func_vf_vnic_ids_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vf_id;
- u8 unused_0[2];
- __le32 max_vnic_id_cnt;
- __le64 vnic_id_tbl_addr;
-};
-
-/* hwrm_func_vf_vnic_ids_query_output (size:128b/16B) */
-struct hwrm_func_vf_vnic_ids_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 vnic_id_cnt;
- u8 unused_0[3];
- u8 valid;
-};
-
/* hwrm_func_drv_rgtr_input (size:896b/112B) */
struct hwrm_func_drv_rgtr_input {
__le16 req_type;
@@ -1286,7 +1256,9 @@ struct hwrm_func_drv_rgtr_output {
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
- u8 unused_0[7];
+ __le32 flags;
+ #define FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED 0x1UL
+ u8 unused_0[3];
u8 valid;
};
@@ -1372,7 +1344,7 @@ struct hwrm_func_drv_qver_input {
u8 unused_0[2];
};
-/* hwrm_func_drv_qver_output (size:192b/24B) */
+/* hwrm_func_drv_qver_output (size:256b/32B) */
struct hwrm_func_drv_qver_output {
__le16 error_code;
__le16 req_type;
@@ -1394,12 +1366,13 @@ struct hwrm_func_drv_qver_output {
u8 ver_maj_8b;
u8 ver_min_8b;
u8 ver_upd_8b;
- u8 unused_0[2];
- u8 valid;
+ u8 unused_0[3];
__le16 ver_maj;
__le16 ver_min;
__le16 ver_upd;
__le16 ver_patch;
+ u8 unused_1[7];
+ u8 valid;
};
/* hwrm_func_resource_qcaps_input (size:192b/24B) */
@@ -1493,6 +1466,410 @@ struct hwrm_func_vf_resource_cfg_output {
u8 valid;
};
+/* hwrm_func_backing_store_qcaps_input (size:128b/16B) */
+struct hwrm_func_backing_store_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_func_backing_store_qcaps_output (size:576b/72B) */
+struct hwrm_func_backing_store_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 qp_max_entries;
+ __le16 qp_min_qp1_entries;
+ __le16 qp_max_l2_entries;
+ __le16 qp_entry_size;
+ __le16 srq_max_l2_entries;
+ __le32 srq_max_entries;
+ __le16 srq_entry_size;
+ __le16 cq_max_l2_entries;
+ __le32 cq_max_entries;
+ __le16 cq_entry_size;
+ __le16 vnic_max_vnic_entries;
+ __le16 vnic_max_ring_table_entries;
+ __le16 vnic_entry_size;
+ __le32 stat_max_entries;
+ __le16 stat_entry_size;
+ __le16 tqm_entry_size;
+ __le32 tqm_min_entries_per_ring;
+ __le32 tqm_max_entries_per_ring;
+ __le32 mrav_max_entries;
+ __le16 mrav_entry_size;
+ __le16 tim_entry_size;
+ __le32 tim_max_entries;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_cfg_input (size:2048b/256B) */
+struct hwrm_func_backing_store_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL
+ __le32 enables;
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL
+ u8 qpc_pg_size_qpc_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G
+ u8 srq_pg_size_srq_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G
+ u8 cq_pg_size_cq_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G
+ u8 vnic_pg_size_vnic_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G
+ u8 stat_pg_size_stat_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G
+ u8 tqm_sp_pg_size_tqm_sp_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G
+ u8 tqm_ring0_pg_size_tqm_ring0_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G
+ u8 tqm_ring1_pg_size_tqm_ring1_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G
+ u8 tqm_ring2_pg_size_tqm_ring2_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G
+ u8 tqm_ring3_pg_size_tqm_ring3_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G
+ u8 tqm_ring4_pg_size_tqm_ring4_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G
+ u8 tqm_ring5_pg_size_tqm_ring5_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G
+ u8 tqm_ring6_pg_size_tqm_ring6_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G
+ u8 tqm_ring7_pg_size_tqm_ring7_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G
+ u8 mrav_pg_size_mrav_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G
+ u8 tim_pg_size_tim_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G
+ __le64 qpc_page_dir;
+ __le64 srq_page_dir;
+ __le64 cq_page_dir;
+ __le64 vnic_page_dir;
+ __le64 stat_page_dir;
+ __le64 tqm_sp_page_dir;
+ __le64 tqm_ring0_page_dir;
+ __le64 tqm_ring1_page_dir;
+ __le64 tqm_ring2_page_dir;
+ __le64 tqm_ring3_page_dir;
+ __le64 tqm_ring4_page_dir;
+ __le64 tqm_ring5_page_dir;
+ __le64 tqm_ring6_page_dir;
+ __le64 tqm_ring7_page_dir;
+ __le64 mrav_page_dir;
+ __le64 tim_page_dir;
+ __le32 qp_num_entries;
+ __le32 srq_num_entries;
+ __le32 cq_num_entries;
+ __le32 stat_num_entries;
+ __le32 tqm_sp_num_entries;
+ __le32 tqm_ring0_num_entries;
+ __le32 tqm_ring1_num_entries;
+ __le32 tqm_ring2_num_entries;
+ __le32 tqm_ring3_num_entries;
+ __le32 tqm_ring4_num_entries;
+ __le32 tqm_ring5_num_entries;
+ __le32 tqm_ring6_num_entries;
+ __le32 tqm_ring7_num_entries;
+ __le32 mrav_num_entries;
+ __le32 tim_num_entries;
+ __le16 qp_num_qp1_entries;
+ __le16 qp_num_l2_entries;
+ __le16 qp_entry_size;
+ __le16 srq_num_l2_entries;
+ __le16 srq_entry_size;
+ __le16 cq_num_l2_entries;
+ __le16 cq_entry_size;
+ __le16 vnic_num_vnic_entries;
+ __le16 vnic_num_ring_table_entries;
+ __le16 vnic_entry_size;
+ __le16 stat_entry_size;
+ __le16 tqm_entry_size;
+ __le16 mrav_entry_size;
+ __le16 tim_entry_size;
+};
+
+/* hwrm_func_backing_store_cfg_output (size:128b/16B) */
+struct hwrm_func_backing_store_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_if_change_input (size:192b/24B) */
+struct hwrm_func_drv_if_change_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP 0x1UL
+ __le32 unused;
+};
+
+/* hwrm_func_drv_if_change_output (size:128b/16B) */
+struct hwrm_func_drv_if_change_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
/* hwrm_port_phy_cfg_input (size:448b/56B) */
struct hwrm_port_phy_cfg_input {
__le16 req_type;
@@ -1592,10 +1969,11 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
#define PORT_PHY_CFG_REQ_WIRESPEED_LAST PORT_PHY_CFG_REQ_WIRESPEED_ON
u8 lpbk;
- #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
- #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
- #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
- #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_REMOTE
+ #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
+ #define PORT_PHY_CFG_REQ_LPBK_EXTERNAL 0x3UL
+ #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_EXTERNAL
u8 force_pause;
#define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
#define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
@@ -1751,10 +2129,11 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
#define PORT_PHY_QCFG_RESP_WIRESPEED_LAST PORT_PHY_QCFG_RESP_WIRESPEED_ON
u8 lpbk;
- #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
- #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
- #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_REMOTE
+ #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
+ #define PORT_PHY_QCFG_RESP_LPBK_EXTERNAL 0x3UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_EXTERNAL
u8 force_pause;
#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
@@ -2014,6 +2393,131 @@ struct hwrm_port_mac_ptp_qcfg_output {
u8 valid;
};
+/* tx_port_stats (size:3264b/408B) */
+struct tx_port_stats {
+ __le64 tx_64b_frames;
+ __le64 tx_65b_127b_frames;
+ __le64 tx_128b_255b_frames;
+ __le64 tx_256b_511b_frames;
+ __le64 tx_512b_1023b_frames;
+ __le64 tx_1024b_1518b_frames;
+ __le64 tx_good_vlan_frames;
+ __le64 tx_1519b_2047b_frames;
+ __le64 tx_2048b_4095b_frames;
+ __le64 tx_4096b_9216b_frames;
+ __le64 tx_9217b_16383b_frames;
+ __le64 tx_good_frames;
+ __le64 tx_total_frames;
+ __le64 tx_ucast_frames;
+ __le64 tx_mcast_frames;
+ __le64 tx_bcast_frames;
+ __le64 tx_pause_frames;
+ __le64 tx_pfc_frames;
+ __le64 tx_jabber_frames;
+ __le64 tx_fcs_err_frames;
+ __le64 tx_control_frames;
+ __le64 tx_oversz_frames;
+ __le64 tx_single_dfrl_frames;
+ __le64 tx_multi_dfrl_frames;
+ __le64 tx_single_coll_frames;
+ __le64 tx_multi_coll_frames;
+ __le64 tx_late_coll_frames;
+ __le64 tx_excessive_coll_frames;
+ __le64 tx_frag_frames;
+ __le64 tx_err;
+ __le64 tx_tagged_frames;
+ __le64 tx_dbl_tagged_frames;
+ __le64 tx_runt_frames;
+ __le64 tx_fifo_underruns;
+ __le64 tx_pfc_ena_frames_pri0;
+ __le64 tx_pfc_ena_frames_pri1;
+ __le64 tx_pfc_ena_frames_pri2;
+ __le64 tx_pfc_ena_frames_pri3;
+ __le64 tx_pfc_ena_frames_pri4;
+ __le64 tx_pfc_ena_frames_pri5;
+ __le64 tx_pfc_ena_frames_pri6;
+ __le64 tx_pfc_ena_frames_pri7;
+ __le64 tx_eee_lpi_events;
+ __le64 tx_eee_lpi_duration;
+ __le64 tx_llfc_logical_msgs;
+ __le64 tx_hcfc_msgs;
+ __le64 tx_total_collisions;
+ __le64 tx_bytes;
+ __le64 tx_xthol_frames;
+ __le64 tx_stat_discard;
+ __le64 tx_stat_error;
+};
+
+/* rx_port_stats (size:4224b/528B) */
+struct rx_port_stats {
+ __le64 rx_64b_frames;
+ __le64 rx_65b_127b_frames;
+ __le64 rx_128b_255b_frames;
+ __le64 rx_256b_511b_frames;
+ __le64 rx_512b_1023b_frames;
+ __le64 rx_1024b_1518b_frames;
+ __le64 rx_good_vlan_frames;
+ __le64 rx_1519b_2047b_frames;
+ __le64 rx_2048b_4095b_frames;
+ __le64 rx_4096b_9216b_frames;
+ __le64 rx_9217b_16383b_frames;
+ __le64 rx_total_frames;
+ __le64 rx_ucast_frames;
+ __le64 rx_mcast_frames;
+ __le64 rx_bcast_frames;
+ __le64 rx_fcs_err_frames;
+ __le64 rx_ctrl_frames;
+ __le64 rx_pause_frames;
+ __le64 rx_pfc_frames;
+ __le64 rx_unsupported_opcode_frames;
+ __le64 rx_unsupported_da_pausepfc_frames;
+ __le64 rx_wrong_sa_frames;
+ __le64 rx_align_err_frames;
+ __le64 rx_oor_len_frames;
+ __le64 rx_code_err_frames;
+ __le64 rx_false_carrier_frames;
+ __le64 rx_ovrsz_frames;
+ __le64 rx_jbr_frames;
+ __le64 rx_mtu_err_frames;
+ __le64 rx_match_crc_frames;
+ __le64 rx_promiscuous_frames;
+ __le64 rx_tagged_frames;
+ __le64 rx_double_tagged_frames;
+ __le64 rx_trunc_frames;
+ __le64 rx_good_frames;
+ __le64 rx_pfc_xon2xoff_frames_pri0;
+ __le64 rx_pfc_xon2xoff_frames_pri1;
+ __le64 rx_pfc_xon2xoff_frames_pri2;
+ __le64 rx_pfc_xon2xoff_frames_pri3;
+ __le64 rx_pfc_xon2xoff_frames_pri4;
+ __le64 rx_pfc_xon2xoff_frames_pri5;
+ __le64 rx_pfc_xon2xoff_frames_pri6;
+ __le64 rx_pfc_xon2xoff_frames_pri7;
+ __le64 rx_pfc_ena_frames_pri0;
+ __le64 rx_pfc_ena_frames_pri1;
+ __le64 rx_pfc_ena_frames_pri2;
+ __le64 rx_pfc_ena_frames_pri3;
+ __le64 rx_pfc_ena_frames_pri4;
+ __le64 rx_pfc_ena_frames_pri5;
+ __le64 rx_pfc_ena_frames_pri6;
+ __le64 rx_pfc_ena_frames_pri7;
+ __le64 rx_sch_crc_err_frames;
+ __le64 rx_undrsz_frames;
+ __le64 rx_frag_frames;
+ __le64 rx_eee_lpi_events;
+ __le64 rx_eee_lpi_duration;
+ __le64 rx_llfc_physical_msgs;
+ __le64 rx_llfc_logical_msgs;
+ __le64 rx_llfc_msgs_with_crc_err;
+ __le64 rx_hcfc_msgs;
+ __le64 rx_hcfc_msgs_with_crc_err;
+ __le64 rx_bytes;
+ __le64 rx_runt_bytes;
+ __le64 rx_runt_frames;
+ __le64 rx_stat_discard;
+ __le64 rx_stat_err;
+};
+
/* hwrm_port_qstats_input (size:320b/40B) */
struct hwrm_port_qstats_input {
__le16 req_type;
@@ -2039,6 +2543,83 @@ struct hwrm_port_qstats_output {
u8 valid;
};
+/* tx_port_stats_ext (size:2048b/256B) */
+struct tx_port_stats_ext {
+ __le64 tx_bytes_cos0;
+ __le64 tx_bytes_cos1;
+ __le64 tx_bytes_cos2;
+ __le64 tx_bytes_cos3;
+ __le64 tx_bytes_cos4;
+ __le64 tx_bytes_cos5;
+ __le64 tx_bytes_cos6;
+ __le64 tx_bytes_cos7;
+ __le64 tx_packets_cos0;
+ __le64 tx_packets_cos1;
+ __le64 tx_packets_cos2;
+ __le64 tx_packets_cos3;
+ __le64 tx_packets_cos4;
+ __le64 tx_packets_cos5;
+ __le64 tx_packets_cos6;
+ __le64 tx_packets_cos7;
+ __le64 pfc_pri0_tx_duration_us;
+ __le64 pfc_pri0_tx_transitions;
+ __le64 pfc_pri1_tx_duration_us;
+ __le64 pfc_pri1_tx_transitions;
+ __le64 pfc_pri2_tx_duration_us;
+ __le64 pfc_pri2_tx_transitions;
+ __le64 pfc_pri3_tx_duration_us;
+ __le64 pfc_pri3_tx_transitions;
+ __le64 pfc_pri4_tx_duration_us;
+ __le64 pfc_pri4_tx_transitions;
+ __le64 pfc_pri5_tx_duration_us;
+ __le64 pfc_pri5_tx_transitions;
+ __le64 pfc_pri6_tx_duration_us;
+ __le64 pfc_pri6_tx_transitions;
+ __le64 pfc_pri7_tx_duration_us;
+ __le64 pfc_pri7_tx_transitions;
+};
+
+/* rx_port_stats_ext (size:2368b/296B) */
+struct rx_port_stats_ext {
+ __le64 link_down_events;
+ __le64 continuous_pause_events;
+ __le64 resume_pause_events;
+ __le64 continuous_roce_pause_events;
+ __le64 resume_roce_pause_events;
+ __le64 rx_bytes_cos0;
+ __le64 rx_bytes_cos1;
+ __le64 rx_bytes_cos2;
+ __le64 rx_bytes_cos3;
+ __le64 rx_bytes_cos4;
+ __le64 rx_bytes_cos5;
+ __le64 rx_bytes_cos6;
+ __le64 rx_bytes_cos7;
+ __le64 rx_packets_cos0;
+ __le64 rx_packets_cos1;
+ __le64 rx_packets_cos2;
+ __le64 rx_packets_cos3;
+ __le64 rx_packets_cos4;
+ __le64 rx_packets_cos5;
+ __le64 rx_packets_cos6;
+ __le64 rx_packets_cos7;
+ __le64 pfc_pri0_rx_duration_us;
+ __le64 pfc_pri0_rx_transitions;
+ __le64 pfc_pri1_rx_duration_us;
+ __le64 pfc_pri1_rx_transitions;
+ __le64 pfc_pri2_rx_duration_us;
+ __le64 pfc_pri2_rx_transitions;
+ __le64 pfc_pri3_rx_duration_us;
+ __le64 pfc_pri3_rx_transitions;
+ __le64 pfc_pri4_rx_duration_us;
+ __le64 pfc_pri4_rx_transitions;
+ __le64 pfc_pri5_rx_duration_us;
+ __le64 pfc_pri5_rx_transitions;
+ __le64 pfc_pri6_rx_duration_us;
+ __le64 pfc_pri6_rx_transitions;
+ __le64 pfc_pri7_rx_duration_us;
+ __le64 pfc_pri7_rx_transitions;
+};
+
/* hwrm_port_qstats_ext_input (size:320b/40B) */
struct hwrm_port_qstats_ext_input {
__le16 req_type;
@@ -2062,7 +2643,8 @@ struct hwrm_port_qstats_ext_output {
__le16 resp_len;
__le16 tx_stat_size;
__le16 rx_stat_size;
- u8 unused_0[3];
+ __le16 total_active_cos_queues;
+ u8 unused_0;
u8 valid;
};
@@ -2153,9 +2735,10 @@ struct hwrm_port_phy_qcaps_output {
__le16 seq_id;
__le16 resp_len;
u8 flags;
- #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfeUL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 1
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfcUL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 2
u8 port_cnt;
#define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
@@ -2612,6 +3195,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id0;
u8 queue_id0_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -2620,6 +3204,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id1;
u8 queue_id1_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -2628,6 +3213,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id2;
u8 queue_id2_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -2636,6 +3222,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id3;
u8 queue_id3_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -2644,6 +3231,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id4;
u8 queue_id4_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -2652,6 +3240,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id5;
u8 queue_id5_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -2660,6 +3249,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id6;
u8 queue_id6_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -2668,6 +3258,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id7;
u8 queue_id7_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -3689,18 +4280,21 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
#define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
__le32 enables;
- #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
- #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
- #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
- #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
- #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
+ #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
+ #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
+ #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
+ #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
+ #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
+ #define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID 0x20UL
+ #define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL
__le16 vnic_id;
__le16 dflt_ring_grp;
__le16 rss_rule;
__le16 cos_rule;
__le16 lb_rule;
__le16 mru;
- u8 unused_0[4];
+ __le16 default_rx_ring_id;
+ __le16 default_cmpl_ring_id;
};
/* hwrm_vnic_cfg_output (size:128b/16B) */
@@ -3740,6 +4334,7 @@ struct hwrm_vnic_qcaps_output {
#define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
#define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
#define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
+ #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
u8 unused_1[7];
u8 valid;
};
@@ -3857,7 +4452,14 @@ struct hwrm_vnic_rss_cfg_input {
#define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
#define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
#define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
- u8 unused_0[4];
+ __le16 vnic_id;
+ u8 ring_table_pair_index;
+ u8 hash_mode_flags;
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT 0x1UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_4 0x2UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_2 0x4UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL
__le64 ring_grp_tbl_addr;
__le64 hash_key_tbl_addr;
__le16 rss_ctx_idx;
@@ -3950,7 +4552,7 @@ struct hwrm_vnic_rss_cos_lb_ctx_free_output {
u8 valid;
};
-/* hwrm_ring_alloc_input (size:640b/80B) */
+/* hwrm_ring_alloc_input (size:704b/88B) */
struct hwrm_ring_alloc_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -3961,12 +4563,17 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
#define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
#define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
+ #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
+ #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
+ #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
u8 ring_type;
#define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
#define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
#define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
#define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL
+ #define RING_ALLOC_REQ_RING_TYPE_RX_AGG 0x4UL
+ #define RING_ALLOC_REQ_RING_TYPE_NQ 0x5UL
+ #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_NQ
u8 unused_0[3];
__le64 page_tbl_addr;
__le32 fbo;
@@ -3977,8 +4584,9 @@ struct hwrm_ring_alloc_input {
__le16 logical_id;
__le16 cmpl_ring_id;
__le16 queue_id;
- u8 unused_2[2];
- __le32 reserved1;
+ __le16 rx_buf_size;
+ __le16 rx_ring_id;
+ __le16 nq_ring_id;
__le16 ring_arb_cfg;
#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL
#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0
@@ -4016,6 +4624,7 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
#define RING_ALLOC_REQ_INT_MODE_LAST RING_ALLOC_REQ_INT_MODE_POLL
u8 unused_4[3];
+ __le64 cq_handle;
};
/* hwrm_ring_alloc_output (size:128b/16B) */
@@ -4042,7 +4651,9 @@ struct hwrm_ring_free_input {
#define RING_FREE_REQ_RING_TYPE_TX 0x1UL
#define RING_FREE_REQ_RING_TYPE_RX 0x2UL
#define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_ROCE_CMPL
+ #define RING_FREE_REQ_RING_TYPE_RX_AGG 0x4UL
+ #define RING_FREE_REQ_RING_TYPE_NQ 0x5UL
+ #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_NQ
u8 unused_0;
__le16 ring_id;
u8 unused_1[4];
@@ -4058,6 +4669,52 @@ struct hwrm_ring_free_output {
u8 valid;
};
+/* hwrm_ring_aggint_qcaps_input (size:128b/16B) */
+struct hwrm_ring_aggint_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_ring_aggint_qcaps_output (size:384b/48B) */
+struct hwrm_ring_aggint_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 cmpl_params;
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN 0x1UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MAX 0x2UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET 0x4UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE 0x8UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR 0x10UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT 0x20UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR 0x40UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT 0x80UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT 0x100UL
+ __le32 nq_params;
+ #define RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN 0x1UL
+ __le16 num_cmpl_dma_aggr_min;
+ __le16 num_cmpl_dma_aggr_max;
+ __le16 num_cmpl_dma_aggr_during_int_min;
+ __le16 num_cmpl_dma_aggr_during_int_max;
+ __le16 cmpl_aggr_dma_tmr_min;
+ __le16 cmpl_aggr_dma_tmr_max;
+ __le16 cmpl_aggr_dma_tmr_during_int_min;
+ __le16 cmpl_aggr_dma_tmr_during_int_max;
+ __le16 int_lat_tmr_min_min;
+ __le16 int_lat_tmr_min_max;
+ __le16 int_lat_tmr_max_min;
+ __le16 int_lat_tmr_max_max;
+ __le16 num_cmpl_aggr_int_min;
+ __le16 num_cmpl_aggr_int_max;
+ __le16 timer_units;
+ u8 unused_0[1];
+ u8 valid;
+};
+
/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */
struct hwrm_ring_cmpl_ring_qaggint_params_input {
__le16 req_type;
@@ -4100,6 +4757,7 @@ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
__le16 flags;
#define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
#define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL
__le16 num_cmpl_dma_aggr;
__le16 num_cmpl_dma_aggr_during_int;
__le16 cmpl_aggr_dma_tmr;
@@ -4107,7 +4765,14 @@ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
__le16 int_lat_tmr_min;
__le16 int_lat_tmr_max;
__le16 num_cmpl_aggr_int;
- u8 unused_0[6];
+ __le16 enables;
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR 0x1UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT 0x2UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_CMPL_AGGR_DMA_TMR 0x4UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MIN 0x8UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MAX 0x10UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_AGGR_INT 0x20UL
+ u8 unused_0[4];
};
/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */
@@ -4120,34 +4785,6 @@ struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
u8 valid;
};
-/* hwrm_ring_reset_input (size:192b/24B) */
-struct hwrm_ring_reset_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 ring_type;
- #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
- #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
- #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_ROCE_CMPL
- u8 unused_0;
- __le16 ring_id;
- u8 unused_1[4];
-};
-
-/* hwrm_ring_reset_output (size:128b/16B) */
-struct hwrm_ring_reset_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
/* hwrm_ring_grp_alloc_input (size:192b/24B) */
struct hwrm_ring_grp_alloc_input {
__le16 req_type;
@@ -5032,7 +5669,8 @@ struct hwrm_tunnel_dst_port_query_input {
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1
u8 unused_0[7];
};
@@ -5059,7 +5697,8 @@ struct hwrm_tunnel_dst_port_alloc_input {
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1
u8 unused_0;
__be16 tunnel_dst_port_val;
u8 unused_1[4];
@@ -5087,7 +5726,8 @@ struct hwrm_tunnel_dst_port_free_input {
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1
u8 unused_0;
__le16 tunnel_dst_port_id;
u8 unused_1[4];
@@ -5259,140 +5899,6 @@ struct hwrm_pcie_qstats_output {
u8 valid;
};
-/* tx_port_stats (size:3264b/408B) */
-struct tx_port_stats {
- __le64 tx_64b_frames;
- __le64 tx_65b_127b_frames;
- __le64 tx_128b_255b_frames;
- __le64 tx_256b_511b_frames;
- __le64 tx_512b_1023b_frames;
- __le64 tx_1024b_1518_frames;
- __le64 tx_good_vlan_frames;
- __le64 tx_1519b_2047_frames;
- __le64 tx_2048b_4095b_frames;
- __le64 tx_4096b_9216b_frames;
- __le64 tx_9217b_16383b_frames;
- __le64 tx_good_frames;
- __le64 tx_total_frames;
- __le64 tx_ucast_frames;
- __le64 tx_mcast_frames;
- __le64 tx_bcast_frames;
- __le64 tx_pause_frames;
- __le64 tx_pfc_frames;
- __le64 tx_jabber_frames;
- __le64 tx_fcs_err_frames;
- __le64 tx_control_frames;
- __le64 tx_oversz_frames;
- __le64 tx_single_dfrl_frames;
- __le64 tx_multi_dfrl_frames;
- __le64 tx_single_coll_frames;
- __le64 tx_multi_coll_frames;
- __le64 tx_late_coll_frames;
- __le64 tx_excessive_coll_frames;
- __le64 tx_frag_frames;
- __le64 tx_err;
- __le64 tx_tagged_frames;
- __le64 tx_dbl_tagged_frames;
- __le64 tx_runt_frames;
- __le64 tx_fifo_underruns;
- __le64 tx_pfc_ena_frames_pri0;
- __le64 tx_pfc_ena_frames_pri1;
- __le64 tx_pfc_ena_frames_pri2;
- __le64 tx_pfc_ena_frames_pri3;
- __le64 tx_pfc_ena_frames_pri4;
- __le64 tx_pfc_ena_frames_pri5;
- __le64 tx_pfc_ena_frames_pri6;
- __le64 tx_pfc_ena_frames_pri7;
- __le64 tx_eee_lpi_events;
- __le64 tx_eee_lpi_duration;
- __le64 tx_llfc_logical_msgs;
- __le64 tx_hcfc_msgs;
- __le64 tx_total_collisions;
- __le64 tx_bytes;
- __le64 tx_xthol_frames;
- __le64 tx_stat_discard;
- __le64 tx_stat_error;
-};
-
-/* rx_port_stats (size:4224b/528B) */
-struct rx_port_stats {
- __le64 rx_64b_frames;
- __le64 rx_65b_127b_frames;
- __le64 rx_128b_255b_frames;
- __le64 rx_256b_511b_frames;
- __le64 rx_512b_1023b_frames;
- __le64 rx_1024b_1518_frames;
- __le64 rx_good_vlan_frames;
- __le64 rx_1519b_2047b_frames;
- __le64 rx_2048b_4095b_frames;
- __le64 rx_4096b_9216b_frames;
- __le64 rx_9217b_16383b_frames;
- __le64 rx_total_frames;
- __le64 rx_ucast_frames;
- __le64 rx_mcast_frames;
- __le64 rx_bcast_frames;
- __le64 rx_fcs_err_frames;
- __le64 rx_ctrl_frames;
- __le64 rx_pause_frames;
- __le64 rx_pfc_frames;
- __le64 rx_unsupported_opcode_frames;
- __le64 rx_unsupported_da_pausepfc_frames;
- __le64 rx_wrong_sa_frames;
- __le64 rx_align_err_frames;
- __le64 rx_oor_len_frames;
- __le64 rx_code_err_frames;
- __le64 rx_false_carrier_frames;
- __le64 rx_ovrsz_frames;
- __le64 rx_jbr_frames;
- __le64 rx_mtu_err_frames;
- __le64 rx_match_crc_frames;
- __le64 rx_promiscuous_frames;
- __le64 rx_tagged_frames;
- __le64 rx_double_tagged_frames;
- __le64 rx_trunc_frames;
- __le64 rx_good_frames;
- __le64 rx_pfc_xon2xoff_frames_pri0;
- __le64 rx_pfc_xon2xoff_frames_pri1;
- __le64 rx_pfc_xon2xoff_frames_pri2;
- __le64 rx_pfc_xon2xoff_frames_pri3;
- __le64 rx_pfc_xon2xoff_frames_pri4;
- __le64 rx_pfc_xon2xoff_frames_pri5;
- __le64 rx_pfc_xon2xoff_frames_pri6;
- __le64 rx_pfc_xon2xoff_frames_pri7;
- __le64 rx_pfc_ena_frames_pri0;
- __le64 rx_pfc_ena_frames_pri1;
- __le64 rx_pfc_ena_frames_pri2;
- __le64 rx_pfc_ena_frames_pri3;
- __le64 rx_pfc_ena_frames_pri4;
- __le64 rx_pfc_ena_frames_pri5;
- __le64 rx_pfc_ena_frames_pri6;
- __le64 rx_pfc_ena_frames_pri7;
- __le64 rx_sch_crc_err_frames;
- __le64 rx_undrsz_frames;
- __le64 rx_frag_frames;
- __le64 rx_eee_lpi_events;
- __le64 rx_eee_lpi_duration;
- __le64 rx_llfc_physical_msgs;
- __le64 rx_llfc_logical_msgs;
- __le64 rx_llfc_msgs_with_crc_err;
- __le64 rx_hcfc_msgs;
- __le64 rx_hcfc_msgs_with_crc_err;
- __le64 rx_bytes;
- __le64 rx_runt_bytes;
- __le64 rx_runt_frames;
- __le64 rx_stat_discard;
- __le64 rx_stat_err;
-};
-
-/* rx_port_stats_ext (size:320b/40B) */
-struct rx_port_stats_ext {
- __le64 link_down_events;
- __le64 continuous_pause_events;
- __le64 resume_pause_events;
- __le64 continuous_roce_pause_events;
- __le64 resume_roce_pause_events;
-};
-
/* pcie_ctx_hw_stats (size:768b/96B) */
struct pcie_ctx_hw_stats {
__le64 pcie_pl_signal_integrity;
@@ -5884,6 +6390,114 @@ struct hwrm_wol_reason_qcfg_output {
u8 valid;
};
+/* coredump_segment_record (size:128b/16B) */
+struct coredump_segment_record {
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 max_instances;
+ u8 version_hi;
+ u8 version_low;
+ u8 seg_flags;
+ u8 unused_0[7];
+};
+
+/* hwrm_dbg_coredump_list_input (size:256b/32B) */
+struct hwrm_dbg_coredump_list_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 host_buf_len;
+ __le16 seq_no;
+ u8 unused_0[2];
+};
+
+/* hwrm_dbg_coredump_list_output (size:128b/16B) */
+struct hwrm_dbg_coredump_list_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define DBG_COREDUMP_LIST_RESP_FLAGS_MORE 0x1UL
+ u8 unused_0;
+ __le16 total_segments;
+ __le16 data_len;
+ u8 unused_1;
+ u8 valid;
+};
+
+/* hwrm_dbg_coredump_initiate_input (size:256b/32B) */
+struct hwrm_dbg_coredump_initiate_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 instance;
+ __le16 unused_0;
+ u8 seg_flags;
+ u8 unused_1[7];
+};
+
+/* hwrm_dbg_coredump_initiate_output (size:128b/16B) */
+struct hwrm_dbg_coredump_initiate_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* coredump_data_hdr (size:128b/16B) */
+struct coredump_data_hdr {
+ __le32 address;
+ __le32 flags_length;
+ __le32 instance;
+ __le32 next_offset;
+};
+
+/* hwrm_dbg_coredump_retrieve_input (size:448b/56B) */
+struct hwrm_dbg_coredump_retrieve_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 host_buf_len;
+ __le32 unused_0;
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 instance;
+ __le16 unused_1;
+ u8 seg_flags;
+ u8 unused_2;
+ __le16 unused_3;
+ __le32 unused_4;
+ __le32 seq_no;
+ __le32 unused_5;
+};
+
+/* hwrm_dbg_coredump_retrieve_output (size:128b/16B) */
+struct hwrm_dbg_coredump_retrieve_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define DBG_COREDUMP_RETRIEVE_RESP_FLAGS_MORE 0x1UL
+ u8 unused_0;
+ __le16 data_len;
+ u8 unused_1[3];
+ u8 valid;
+};
+
/* hwrm_nvm_read_input (size:320b/40B) */
struct hwrm_nvm_read_input {
__le16 req_type;
@@ -6269,12 +6883,14 @@ struct hwrm_nvm_set_variable_input {
__le16 index_2;
__le16 index_3;
u8 flags;
- #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_AES256 (0x2UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH (0x3UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH
u8 unused_0;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index a64910892c25..6d583bcd2a81 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -447,7 +447,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
struct bnxt_pf_info *pf = &bp->pf;
- int i, rc = 0;
+ int i, rc = 0, min = 1;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
@@ -464,14 +464,19 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
req.max_rsscos_ctx = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
- if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL) {
- req.min_cmpl_rings = cpu_to_le16(1);
- req.min_tx_rings = cpu_to_le16(1);
- req.min_rx_rings = cpu_to_le16(1);
- req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MIN_L2_CTX);
- req.min_vnics = cpu_to_le16(1);
- req.min_stat_ctx = cpu_to_le16(1);
- req.min_hw_ring_grps = cpu_to_le16(1);
+ if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
+ min = 0;
+ req.min_rsscos_ctx = cpu_to_le16(min);
+ }
+ if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL ||
+ pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
+ req.min_cmpl_rings = cpu_to_le16(min);
+ req.min_tx_rings = cpu_to_le16(min);
+ req.min_rx_rings = cpu_to_le16(min);
+ req.min_l2_ctxs = cpu_to_le16(min);
+ req.min_vnics = cpu_to_le16(min);
+ req.min_stat_ctx = cpu_to_le16(min);
+ req.min_hw_ring_grps = cpu_to_le16(min);
} else {
vf_cp_rings /= num_vfs;
vf_tx_rings /= num_vfs;
@@ -618,7 +623,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
static int bnxt_func_cfg(struct bnxt *bp, int num_vfs)
{
- if (bp->flags & BNXT_FLAG_NEW_RM)
+ if (BNXT_NEW_RM(bp))
return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs);
else
return bnxt_hwrm_func_cfg(bp, num_vfs);
@@ -956,9 +961,13 @@ static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
} else if (is_valid_ether_addr(vf->vf_mac_addr)) {
if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
mac_ok = true;
- } else if (bp->hwrm_spec_code < 0x10202) {
- mac_ok = true;
} else {
+ /* There are two cases:
+ * 1.If firmware spec < 0x10202,VF MAC address is not forwarded
+ * to the PF and so it doesn't have to match
+ * 2.Allow VF to modify it's own MAC when PF has not assigned a
+ * valid MAC address and firmware spec >= 0x10202
+ */
mac_ok = true;
}
if (mac_ok)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 491bd40a254d..139d96c5a023 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -1568,22 +1568,16 @@ void bnxt_tc_flow_stats_work(struct bnxt *bp)
int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
struct tc_cls_flower_offload *cls_flower)
{
- int rc = 0;
-
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
- rc = bnxt_tc_add_flow(bp, src_fid, cls_flower);
- break;
-
+ return bnxt_tc_add_flow(bp, src_fid, cls_flower);
case TC_CLSFLOWER_DESTROY:
- rc = bnxt_tc_del_flow(bp, cls_flower);
- break;
-
+ return bnxt_tc_del_flow(bp, cls_flower);
case TC_CLSFLOWER_STATS:
- rc = bnxt_tc_get_flow_stats(bp, cls_flower);
- break;
+ return bnxt_tc_get_flow_stats(bp, cls_flower);
+ default:
+ return -EOPNOTSUPP;
}
- return rc;
}
static const struct rhashtable_params bnxt_tc_flow_ht_params = {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 840f6e505f73..c37b2842f972 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -141,7 +141,7 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
if (avail_msix > num_msix)
avail_msix = num_msix;
- if (bp->flags & BNXT_FLAG_NEW_RM) {
+ if (BNXT_NEW_RM(bp)) {
idx = bp->cp_nr_rings;
} else {
max_idx = min_t(int, bp->total_irqs, max_cp_rings);
@@ -162,7 +162,7 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
return -EAGAIN;
}
- if (bp->flags & BNXT_FLAG_NEW_RM) {
+ if (BNXT_NEW_RM(bp)) {
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
avail_msix = hw_resc->resv_cp_rings - bp->cp_nr_rings;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 05d405905906..e31f5d803c13 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -173,7 +173,7 @@ static int bnxt_vf_rep_setup_tc_block(struct net_device *dev,
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block,
bnxt_vf_rep_setup_tc_block_cb,
- vf_rep, vf_rep);
+ vf_rep, vf_rep, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block,
bnxt_vf_rep_setup_tc_block_cb, vf_rep);
@@ -543,9 +543,14 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode)
break;
case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ if (bp->hwrm_spec_code < 0x10803) {
+ netdev_warn(bp->dev, "FW does not support SRIOV E-Switch SWITCHDEV mode\n");
+ rc = -ENOTSUPP;
+ goto done;
+ }
+
if (pci_num_vf(bp->pdev) == 0) {
- netdev_info(bp->dev,
- "Enable VFs before setting switchdev mode");
+ netdev_info(bp->dev, "Enable VFs before setting switchdev mode");
rc = -EPERM;
goto done;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 1f0e872d0667..0584d07c8c33 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -219,7 +219,6 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
rc = bnxt_xdp_set(bp, xdp->prog);
break;
case XDP_QUERY_PROG:
- xdp->prog_attached = !!bp->xdp_prog;
xdp->prog_id = bp->xdp_prog ? bp->xdp_prog->aux->id : 0;
rc = 0;
break;
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 4fd829b5e65d..d83233ae4a15 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -2562,7 +2562,6 @@ static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
{
- struct fcoe_kwqe_destroy *req;
union l5cm_specific_data l5_data;
struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
@@ -2571,7 +2570,6 @@ static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
- req = (struct fcoe_kwqe_destroy *) kwqe;
cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
memset(&l5_data, 0, sizeof(l5_data));
@@ -4090,7 +4088,7 @@ static void cnic_cm_free_mem(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
- kfree(cp->csk_tbl);
+ kvfree(cp->csk_tbl);
cp->csk_tbl = NULL;
cnic_free_id_tbl(&cp->csk_port_tbl);
}
@@ -4100,8 +4098,8 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev)
struct cnic_local *cp = dev->cnic_priv;
u32 port_id;
- cp->csk_tbl = kcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock),
- GFP_KERNEL);
+ cp->csk_tbl = kvcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock),
+ GFP_KERNEL);
if (!cp->csk_tbl)
return -ENOMEM;
@@ -5091,13 +5089,12 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_eth_dev *ethdev = cp->ethdev;
- int func, ret;
+ int ret;
u32 pfid;
dev->stats_addr = ethdev->addr_drv_info_to_mcp;
cp->func = bp->pf_num;
- func = CNIC_FUNC(cp);
pfid = bp->pfid;
ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index aa1374d0af93..d8dad07f826a 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -725,6 +725,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
case TG3_APE_LOCK_GPIO:
if (tg3_asic_rev(tp) == ASIC_REV_5761)
return 0;
+ /* else: fall through */
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
if (!tp->pci_fn)
@@ -785,6 +786,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
case TG3_APE_LOCK_GPIO:
if (tg3_asic_rev(tp) == ASIC_REV_5761)
return;
+ /* else: fall through */
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
if (!tp->pci_fn)
@@ -10719,28 +10721,40 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
switch (limit) {
case 16:
tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
+ /* fall through */
case 15:
tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
+ /* fall through */
case 14:
tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
+ /* fall through */
case 13:
tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
+ /* fall through */
case 12:
tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
+ /* fall through */
case 11:
tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
+ /* fall through */
case 10:
tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
+ /* fall through */
case 9:
tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
+ /* fall through */
case 8:
tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
+ /* fall through */
case 7:
tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
+ /* fall through */
case 6:
tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
+ /* fall through */
case 5:
tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
+ /* fall through */
case 4:
/* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
case 3:
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 427d65a1a126..b9984015ca8c 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -2,7 +2,7 @@
# Atmel device configuration
#
-config NET_CADENCE
+config NET_VENDOR_CADENCE
bool "Cadence devices"
depends on HAS_IOMEM
default y
@@ -16,7 +16,7 @@ config NET_CADENCE
the remaining Atmel network card questions. If you say Y, you will be
asked for your specific card in the following questions.
-if NET_CADENCE
+if NET_VENDOR_CADENCE
config MACB
tristate "Cadence MACB/GEM support"
@@ -48,4 +48,4 @@ config MACB_PCI
To compile this driver as a module, choose M here: the module
will be called macb_pci.
-endif # NET_CADENCE
+endif # NET_VENDOR_CADENCE
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index a6c911bb5ce2..dc09f9a8a49b 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -10,6 +10,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
+#include <linux/crc32.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@@ -1565,6 +1566,9 @@ static unsigned int macb_tx_map(struct macb *bp,
if (i == queue->tx_head) {
ctrl |= MACB_BF(TX_LSO, lso_ctrl);
ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
+ if ((bp->dev->features & NETIF_F_HW_CSUM) &&
+ skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl)
+ ctrl |= MACB_BIT(TX_NOCRC);
} else
/* Only set MSS/MFS on payload descriptors
* (second or later descriptor)
@@ -1651,7 +1655,68 @@ static inline int macb_clear_csum(struct sk_buff *skb)
return 0;
}
-static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
+{
+ bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb);
+ int padlen = ETH_ZLEN - (*skb)->len;
+ int headroom = skb_headroom(*skb);
+ int tailroom = skb_tailroom(*skb);
+ struct sk_buff *nskb;
+ u32 fcs;
+
+ if (!(ndev->features & NETIF_F_HW_CSUM) ||
+ !((*skb)->ip_summed != CHECKSUM_PARTIAL) ||
+ skb_shinfo(*skb)->gso_size) /* Not available for GSO */
+ return 0;
+
+ if (padlen <= 0) {
+ /* FCS could be appeded to tailroom. */
+ if (tailroom >= ETH_FCS_LEN)
+ goto add_fcs;
+ /* FCS could be appeded by moving data to headroom. */
+ else if (!cloned && headroom + tailroom >= ETH_FCS_LEN)
+ padlen = 0;
+ /* No room for FCS, need to reallocate skb. */
+ else
+ padlen = ETH_FCS_LEN - tailroom;
+ } else {
+ /* Add room for FCS. */
+ padlen += ETH_FCS_LEN;
+ }
+
+ if (!cloned && headroom + tailroom >= padlen) {
+ (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len);
+ skb_set_tail_pointer(*skb, (*skb)->len);
+ } else {
+ nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
+ if (!nskb)
+ return -ENOMEM;
+
+ dev_kfree_skb_any(*skb);
+ *skb = nskb;
+ }
+
+ if (padlen) {
+ if (padlen >= ETH_FCS_LEN)
+ skb_put_zero(*skb, padlen - ETH_FCS_LEN);
+ else
+ skb_trim(*skb, ETH_FCS_LEN - padlen);
+ }
+
+add_fcs:
+ /* set FCS to packet */
+ fcs = crc32_le(~0, (*skb)->data, (*skb)->len);
+ fcs = ~fcs;
+
+ skb_put_u8(*skb, fcs & 0xff);
+ skb_put_u8(*skb, (fcs >> 8) & 0xff);
+ skb_put_u8(*skb, (fcs >> 16) & 0xff);
+ skb_put_u8(*skb, (fcs >> 24) & 0xff);
+
+ return 0;
+}
+
+static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
u16 queue_index = skb_get_queue_mapping(skb);
struct macb *bp = netdev_priv(dev);
@@ -1660,6 +1725,17 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int desc_cnt, nr_frags, frag_size, f;
unsigned int hdrlen;
bool is_lso, is_udp = 0;
+ netdev_tx_t ret = NETDEV_TX_OK;
+
+ if (macb_clear_csum(skb)) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ if (macb_pad_and_fcs(&skb, dev)) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
is_lso = (skb_shinfo(skb)->gso_size != 0);
@@ -1716,11 +1792,6 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- if (macb_clear_csum(skb)) {
- dev_kfree_skb_any(skb);
- goto unlock;
- }
-
/* Map socket buffer for DMA transfer */
if (!macb_tx_map(bp, queue, skb, hdrlen)) {
dev_kfree_skb_any(skb);
@@ -1739,7 +1810,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
unlock:
spin_unlock_irqrestore(&bp->lock, flags);
- return NETDEV_TX_OK;
+ return ret;
}
static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
@@ -3549,7 +3620,8 @@ static int at91ether_close(struct net_device *dev)
}
/* Transmit packet */
-static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
struct macb *lp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index 678835136bf8..cd5296b84229 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -466,6 +466,7 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd)
case HWTSTAMP_TX_ONESTEP_SYNC:
if (gem_ptp_set_one_step_sync(bp, 1) != 0)
return -ERANGE;
+ /* fall through */
case HWTSTAMP_TX_ON:
tx_bd_control = TSTAMP_ALL_FRAMES;
break;
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 92d88c5f76fb..5f03199a3acf 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -4,7 +4,6 @@
config NET_VENDOR_CAVIUM
bool "Cavium ethernet drivers"
- depends on PCI
default y
---help---
Select this option if you want enable Cavium network support.
@@ -36,7 +35,7 @@ config THUNDER_NIC_BGX
tristate "Thunder MAC interface driver (BGX)"
depends on 64BIT && PCI
select PHYLIB
- select MDIO_THUNDER
+ select MDIO_THUNDER if PCI
select THUNDER_NIC_RGX
---help---
This driver supports programming and controlling of MAC
@@ -46,7 +45,7 @@ config THUNDER_NIC_RGX
tristate "Thunder MAC interface driver (RGX)"
depends on 64BIT && PCI
select PHYLIB
- select MDIO_THUNDER
+ select MDIO_THUNDER if PCI
---help---
This driver supports configuring XCV block of RGX interface
present on CN81XX chip.
@@ -67,6 +66,7 @@ config LIQUIDIO
tristate "Cavium LiquidIO support"
depends on 64BIT && PCI
depends on MAY_USE_DEVLINK
+ depends on PCI
imply PTP_1588_CLOCK
select FW_LOADER
select LIBCRC32C
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index 929d485a3a2f..9f4f3c1d5043 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -493,6 +493,9 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
for (q_no = srn; q_no < ern; q_no++) {
reg_val = octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
+ /* clear IPTR */
+ reg_val &= ~CN23XX_PKT_OUTPUT_CTL_IPTR;
+
/* set DPTR */
reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR;
@@ -1414,50 +1417,6 @@ int validate_cn23xx_pf_config_info(struct octeon_device *oct,
return 0;
}
-void cn23xx_dump_iq_regs(struct octeon_device *oct)
-{
- u32 regval, q_no;
-
- dev_dbg(&oct->pci_dev->dev, "SLI_IQ_DOORBELL_0 [0x%x]: 0x%016llx\n",
- CN23XX_SLI_IQ_DOORBELL(0),
- CVM_CAST64(octeon_read_csr64
- (oct, CN23XX_SLI_IQ_DOORBELL(0))));
-
- dev_dbg(&oct->pci_dev->dev, "SLI_IQ_BASEADDR_0 [0x%x]: 0x%016llx\n",
- CN23XX_SLI_IQ_BASE_ADDR64(0),
- CVM_CAST64(octeon_read_csr64
- (oct, CN23XX_SLI_IQ_BASE_ADDR64(0))));
-
- dev_dbg(&oct->pci_dev->dev, "SLI_IQ_FIFO_RSIZE_0 [0x%x]: 0x%016llx\n",
- CN23XX_SLI_IQ_SIZE(0),
- CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_IQ_SIZE(0))));
-
- dev_dbg(&oct->pci_dev->dev, "SLI_CTL_STATUS [0x%x]: 0x%016llx\n",
- CN23XX_SLI_CTL_STATUS,
- CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_CTL_STATUS)));
-
- for (q_no = 0; q_no < CN23XX_MAX_INPUT_QUEUES; q_no++) {
- dev_dbg(&oct->pci_dev->dev, "SLI_PKT[%d]_INPUT_CTL [0x%x]: 0x%016llx\n",
- q_no, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
- CVM_CAST64(octeon_read_csr64
- (oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no))));
- }
-
- pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
- dev_dbg(&oct->pci_dev->dev, "Config DevCtl [0x%x]: 0x%08x\n",
- CN23XX_CONFIG_PCIE_DEVCTL, regval);
-
- dev_dbg(&oct->pci_dev->dev, "SLI_PRT[%d]_CFG [0x%llx]: 0x%016llx\n",
- oct->pcie_port, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port),
- CVM_CAST64(lio_pci_readq(
- oct, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port))));
-
- dev_dbg(&oct->pci_dev->dev, "SLI_S2M_PORT[%d]_CTL [0x%x]: 0x%016llx\n",
- oct->pcie_port, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port))));
-}
-
int cn23xx_fw_loaded(struct octeon_device *oct)
{
u64 val;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
index 9338a0008378..962bb62933db 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
@@ -165,6 +165,9 @@ static void cn23xx_vf_setup_global_output_regs(struct octeon_device *oct)
reg_val =
octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no));
+ /* clear IPTR */
+ reg_val &= ~CN23XX_PKT_OUTPUT_CTL_IPTR;
+
/* set DPTR */
reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR;
@@ -379,7 +382,7 @@ void cn23xx_vf_ask_pf_to_do_flr(struct octeon_device *oct)
mbox_cmd.recv_len = 0;
mbox_cmd.recv_status = 0;
mbox_cmd.fn = NULL;
- mbox_cmd.fn_arg = 0;
+ mbox_cmd.fn_arg = NULL;
octeon_mbox_write(oct, &mbox_cmd);
}
@@ -679,33 +682,3 @@ int cn23xx_setup_octeon_vf_device(struct octeon_device *oct)
return 0;
}
-
-void cn23xx_dump_vf_iq_regs(struct octeon_device *oct)
-{
- u32 regval, q_no;
-
- dev_dbg(&oct->pci_dev->dev, "SLI_IQ_DOORBELL_0 [0x%x]: 0x%016llx\n",
- CN23XX_VF_SLI_IQ_DOORBELL(0),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_VF_SLI_IQ_DOORBELL(0))));
-
- dev_dbg(&oct->pci_dev->dev, "SLI_IQ_BASEADDR_0 [0x%x]: 0x%016llx\n",
- CN23XX_VF_SLI_IQ_BASE_ADDR64(0),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_VF_SLI_IQ_BASE_ADDR64(0))));
-
- dev_dbg(&oct->pci_dev->dev, "SLI_IQ_FIFO_RSIZE_0 [0x%x]: 0x%016llx\n",
- CN23XX_VF_SLI_IQ_SIZE(0),
- CVM_CAST64(octeon_read_csr64(oct, CN23XX_VF_SLI_IQ_SIZE(0))));
-
- for (q_no = 0; q_no < oct->sriov_info.rings_per_vf; q_no++) {
- dev_dbg(&oct->pci_dev->dev, "SLI_PKT[%d]_INPUT_CTL [0x%x]: 0x%016llx\n",
- q_no, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no))));
- }
-
- pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
- dev_dbg(&oct->pci_dev->dev, "Config DevCtl [0x%x]: 0x%08x\n",
- CN23XX_CONFIG_PCIE_DEVCTL, regval);
-}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 06f7449c569d..8e05afd5e39c 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -389,18 +389,14 @@ static int lio_set_link_ksettings(struct net_device *netdev,
struct lio *lio = GET_LIO(netdev);
struct oct_link_info *linfo;
struct octeon_device *oct;
- u32 is25G = 0;
oct = lio->oct_dev;
linfo = &lio->linfo;
- if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
- oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) {
- is25G = 1;
- } else {
+ if (!(oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
+ oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID))
return -EOPNOTSUPP;
- }
if (oct->no_speed_setting) {
dev_err(&oct->pci_dev->dev, "%s: Changing speed is not supported\n",
@@ -857,7 +853,14 @@ static int lio_set_phys_id(struct net_device *netdev,
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
+ struct oct_link_info *linfo;
int value, ret;
+ u32 cur_ver;
+
+ linfo = &lio->linfo;
+ cur_ver = OCT_FW_VER(oct->fw_info.ver.maj,
+ oct->fw_info.ver.min,
+ oct->fw_info.ver.rev);
switch (state) {
case ETHTOOL_ID_ACTIVE:
@@ -896,16 +899,22 @@ static int lio_set_phys_id(struct net_device *netdev,
return ret;
} else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
octnet_id_active(netdev, LED_IDENTIFICATION_ON);
-
- /* returns 0 since updates are asynchronous */
- return 0;
+ if (linfo->link.s.phy_type == LIO_PHY_PORT_TP &&
+ cur_ver > OCT_FW_VER(1, 7, 2))
+ return 2;
+ else
+ return 0;
} else {
return -EINVAL;
}
break;
case ETHTOOL_ID_ON:
- if (oct->chip_id == OCTEON_CN66XX)
+ if (oct->chip_id == OCTEON_CN23XX_PF_VID &&
+ linfo->link.s.phy_type == LIO_PHY_PORT_TP &&
+ cur_ver > OCT_FW_VER(1, 7, 2))
+ octnet_id_active(netdev, LED_IDENTIFICATION_ON);
+ else if (oct->chip_id == OCTEON_CN66XX)
octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
VITESSE_PHY_GPIO_HIGH);
else
@@ -914,7 +923,11 @@ static int lio_set_phys_id(struct net_device *netdev,
break;
case ETHTOOL_ID_OFF:
- if (oct->chip_id == OCTEON_CN66XX)
+ if (oct->chip_id == OCTEON_CN23XX_PF_VID &&
+ linfo->link.s.phy_type == LIO_PHY_PORT_TP &&
+ cur_ver > OCT_FW_VER(1, 7, 2))
+ octnet_id_active(netdev, LED_IDENTIFICATION_OFF);
+ else if (oct->chip_id == OCTEON_CN66XX)
octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
VITESSE_PHY_GPIO_LOW);
else
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 7e8454d3b1ad..6fb13fa73b27 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -687,7 +687,7 @@ static void lio_sync_octeon_time(struct work_struct *work)
lt = (struct lio_time *)sc->virtdptr;
/* Get time of the day */
- getnstimeofday64(&ts);
+ ktime_get_real_ts64(&ts);
lt->sec = ts.tv_sec;
lt->nsec = ts.tv_nsec;
octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8);
@@ -2209,6 +2209,7 @@ static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case SIOCSHWTSTAMP:
if (lio->oct_dev->ptp_enable)
return hwtstamp_ioctl(netdev, ifr);
+ /* fall through */
default:
return -EOPNOTSUPP;
}
@@ -2631,7 +2632,7 @@ static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
- dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
+ dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
ret);
}
return ret;
@@ -2909,7 +2910,7 @@ static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
nctrl.ncmd.s.more = 0;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.cb_fn = 0;
+ nctrl.cb_fn = NULL;
nctrl.wait_time = LIO_CMD_WAIT_TM;
octnet_send_nic_ctrl_pkt(oct, &nctrl);
@@ -3068,7 +3069,7 @@ static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
nctrl.ncmd.s.param2 = linkstate;
nctrl.ncmd.s.more = 0;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.cb_fn = 0;
+ nctrl.cb_fn = NULL;
nctrl.wait_time = LIO_CMD_WAIT_TM;
octnet_send_nic_ctrl_pkt(oct, &nctrl);
@@ -3302,7 +3303,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
{
struct lio *lio = NULL;
struct net_device *netdev;
- u8 mac[6], i, j, *fw_ver;
+ u8 mac[6], i, j, *fw_ver, *micro_ver;
+ unsigned long micro;
+ u32 cur_ver;
struct octeon_soft_command *sc;
struct liquidio_if_cfg_context *ctx;
struct liquidio_if_cfg_resp *resp;
@@ -3432,6 +3435,14 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
fw_ver);
}
+ /* extract micro version field; point past '<maj>.<min>.' */
+ micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1;
+ if (kstrtoul(micro_ver, 10, &micro) != 0)
+ micro = 0;
+ octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION;
+ octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION;
+ octeon_dev->fw_info.ver.rev = micro;
+
octeon_swap_8B_data((u64 *)(&resp->cfg_info),
(sizeof(struct liquidio_if_cfg_info)) >> 3);
@@ -3572,9 +3583,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
u8 vfmac[ETH_ALEN];
- random_ether_addr(&vfmac[0]);
- if (__liquidio_set_vf_mac(netdev, j,
- &vfmac[0], false)) {
+ eth_random_addr(vfmac);
+ if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) {
dev_err(&octeon_dev->pci_dev->dev,
"Error setting VF%d MAC address\n",
j);
@@ -3675,7 +3685,19 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
OCTEON_CN2350_25GB_SUBSYS_ID ||
octeon_dev->subsystem_id ==
OCTEON_CN2360_25GB_SUBSYS_ID) {
- liquidio_get_speed(lio);
+ cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj,
+ octeon_dev->fw_info.ver.min,
+ octeon_dev->fw_info.ver.rev);
+
+ /* speed control unsupported in f/w older than 1.7.2 */
+ if (cur_ver < OCT_FW_VER(1, 7, 2)) {
+ dev_info(&octeon_dev->pci_dev->dev,
+ "speed setting not supported by f/w.");
+ octeon_dev->speed_setting = 25;
+ octeon_dev->no_speed_setting = 1;
+ } else {
+ liquidio_get_speed(lio);
+ }
if (octeon_dev->speed_setting == 0) {
octeon_dev->speed_setting = 25;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 7fa0212873ac..b77835724dc8 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -1693,7 +1693,7 @@ liquidio_vlan_rx_kill_vid(struct net_device *netdev,
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
- dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
+ dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
ret);
}
return ret;
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index 690424b6781a..7407fcd338e9 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -907,6 +907,7 @@ static inline int opcode_slow_path(union octeon_rh *rh)
#define VITESSE_PHY_GPIO_LOW 0x3
#define LED_IDENTIFICATION_ON 0x1
#define LED_IDENTIFICATION_OFF 0x0
+#define LIO23XX_COPPERHEAD_LED_GPIO 0x2
struct oct_mdio_cmd {
u64 op;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
index 7f97ae48efed..0cc2338d8d2a 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -902,7 +902,7 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
*
* Octeon always uses UTC time. so timezone information is not sent.
*/
- getnstimeofday64(&ts);
+ ktime_get_real_ts64(&ts);
ret = snprintf(boottime, MAX_BOOTTIME_SIZE,
" time_sec=%lld time_nsec=%ld",
(s64)ts.tv_sec, ts.tv_nsec);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index 94a4ed88d618..d99ca6ba23a4 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -288,8 +288,17 @@ struct oct_fw_info {
*/
u32 app_mode;
char liquidio_firmware_version[32];
+ /* Fields extracted from legacy string 'liquidio_firmware_version' */
+ struct {
+ u8 maj;
+ u8 min;
+ u8 rev;
+ } ver;
};
+#define OCT_FW_VER(maj, min, rev) \
+ (((u32)(maj) << 16) | ((u32)(min) << 8) | ((u32)(rev)))
+
/* wrappers around work structs */
struct cavium_wk {
struct delayed_work work;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
index 5fed7b63223e..2327062e8af6 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -82,6 +82,16 @@ struct octeon_instr_queue {
/** A spinlock to protect while posting on the ring. */
spinlock_t post_lock;
+ /** This flag indicates if the queue can be used for soft commands.
+ * If this flag is set, post_lock must be acquired before posting
+ * a command to the queue.
+ * If this flag is clear, post_lock is invalid for the queue.
+ * All control commands (soft commands) will go through only Queue 0
+ * (control and data queue). So only queue-0 needs post_lock,
+ * other queues are only data queues and does not need post_lock
+ */
+ bool allow_soft_cmds;
+
u32 pkt_in_done;
/** A spinlock to protect access to the input ring.*/
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index 1f2e75da28f8..8f746e1348d4 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -110,8 +110,8 @@ int octeon_init_instr_queue(struct octeon_device *oct,
memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs);
- dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n",
- iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count);
+ dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %pad count: %d\n",
+ iq_no, iq->base_addr, &iq->base_addr_dma, iq->max_count);
iq->txpciq.u64 = txpciq.u64;
iq->fill_threshold = (u32)conf->db_min;
@@ -126,7 +126,12 @@ int octeon_init_instr_queue(struct octeon_device *oct,
/* Initialize the spinlock for this instruction queue */
spin_lock_init(&iq->lock);
- spin_lock_init(&iq->post_lock);
+ if (iq_no == 0) {
+ iq->allow_soft_cmds = true;
+ spin_lock_init(&iq->post_lock);
+ } else {
+ iq->allow_soft_cmds = false;
+ }
spin_lock_init(&iq->iq_flush_running_lock);
@@ -566,7 +571,8 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
/* Get the lock and prevent other tasks and tx interrupt handler from
* running.
*/
- spin_lock_bh(&iq->post_lock);
+ if (iq->allow_soft_cmds)
+ spin_lock_bh(&iq->post_lock);
st = __post_command2(iq, cmd);
@@ -583,7 +589,8 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
}
- spin_unlock_bh(&iq->post_lock);
+ if (iq->allow_soft_cmds)
+ spin_unlock_bh(&iq->post_lock);
/* This is only done here to expedite packets being flushed
* for cases where there are no IQ completion interrupts.
@@ -702,11 +709,20 @@ octeon_prepare_soft_command(struct octeon_device *oct,
int octeon_send_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc)
{
+ struct octeon_instr_queue *iq;
struct octeon_instr_ih2 *ih2;
struct octeon_instr_ih3 *ih3;
struct octeon_instr_irh *irh;
u32 len;
+ iq = oct->instr_queue[sc->iq_no];
+ if (!iq->allow_soft_cmds) {
+ dev_err(&oct->pci_dev->dev, "Soft commands are not allowed on Queue %d\n",
+ sc->iq_no);
+ INCR_INSTRQUEUE_PKT_COUNT(oct, sc->iq_no, instr_dropped, 1);
+ return IQ_SEND_FAILED;
+ }
+
if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
if (ih3->dlengsz) {
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 5603f5ab1fee..92ba958e204e 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -527,6 +527,7 @@ static int nicvf_get_rss_hash_opts(struct nicvf *nic,
case SCTP_V4_FLOW:
case SCTP_V6_FLOW:
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case IPV4_FLOW:
case IPV6_FLOW:
info->data |= RXH_IP_SRC | RXH_IP_DST;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 135766c4296b..768f584f8392 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1848,7 +1848,6 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
case XDP_SETUP_PROG:
return nicvf_xdp_setup(nic, xdp->prog);
case XDP_QUERY_PROG:
- xdp->prog_attached = !!nic->xdp_prog;
xdp->prog_id = nic->xdp_prog ? nic->xdp_prog->aux->id : 0;
return 0;
default:
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 8623be13bf86..0ccdde366ae1 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -109,10 +109,6 @@ static int disable_msi = 0;
module_param(disable_msi, int, 0);
MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
-static const char pci_speed[][4] = {
- "33", "66", "100", "133"
-};
-
/*
* Setup MAC to receive the types of packets we want.
*/
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 248e40c6966c..0e9182d3f02c 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -136,6 +136,7 @@ again:
if (e->state == L2T_STATE_STALE)
e->state = L2T_STATE_VALID;
spin_unlock_bh(&e->lock);
+ /* fall through */
case L2T_STATE_VALID: /* fast-path, send the packet on */
return cxgb3_ofld_send(dev, skb);
case L2T_STATE_RESOLVING:
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
index 3c5057868ab3..36d25883d123 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
@@ -120,6 +120,8 @@ struct cudbg_mem_desc {
u32 idx;
};
+#define CUDBG_MEMINFO_REV 1
+
struct cudbg_meminfo {
struct cudbg_mem_desc avail[4];
struct cudbg_mem_desc mem[ARRAY_SIZE(cudbg_region) + 3];
@@ -137,6 +139,9 @@ struct cudbg_meminfo {
u32 port_alloc[4];
u32 loopback_used[NCHAN];
u32 loopback_alloc[NCHAN];
+ u32 p_structs_free_cnt;
+ u32 free_rx_cnt;
+ u32 free_tx_cnt;
};
struct cudbg_cim_pif_la {
@@ -281,12 +286,18 @@ struct cudbg_tid_data {
#define CUDBG_NUM_ULPTX 11
#define CUDBG_NUM_ULPTX_READ 512
+#define CUDBG_NUM_ULPTX_ASIC 6
+#define CUDBG_NUM_ULPTX_ASIC_READ 128
+
+#define CUDBG_ULPTX_LA_REV 1
struct cudbg_ulptx_la {
u32 rdptr[CUDBG_NUM_ULPTX];
u32 wrptr[CUDBG_NUM_ULPTX];
u32 rddata[CUDBG_NUM_ULPTX];
u32 rd_data[CUDBG_NUM_ULPTX][CUDBG_NUM_ULPTX_READ];
+ u32 rdptr_asic[CUDBG_NUM_ULPTX_ASIC_READ];
+ u32 rddata_asic[CUDBG_NUM_ULPTX_ASIC_READ][CUDBG_NUM_ULPTX_ASIC];
};
#define CUDBG_CHAC_PBT_ADDR 0x2800
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index 0afcfe99bff3..d97e0d7e541a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -349,6 +349,11 @@ int cudbg_fill_meminfo(struct adapter *padap,
meminfo_buff->up_extmem2_hi = hi;
lo = t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A);
+ for (i = 0, meminfo_buff->free_rx_cnt = 0; i < 2; i++)
+ meminfo_buff->free_rx_cnt +=
+ FREERXPAGECOUNT_G(t4_read_reg(padap,
+ TP_FLM_FREE_RX_CNT_A));
+
meminfo_buff->rx_pages_data[0] = PMRXMAXPAGE_G(lo);
meminfo_buff->rx_pages_data[1] =
t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) >> 10;
@@ -356,6 +361,11 @@ int cudbg_fill_meminfo(struct adapter *padap,
lo = t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A);
hi = t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A);
+ for (i = 0, meminfo_buff->free_tx_cnt = 0; i < 4; i++)
+ meminfo_buff->free_tx_cnt +=
+ FREETXPAGECOUNT_G(t4_read_reg(padap,
+ TP_FLM_FREE_TX_CNT_A));
+
meminfo_buff->tx_pages_data[0] = PMTXMAXPAGE_G(lo);
meminfo_buff->tx_pages_data[1] =
hi >= (1 << 20) ? (hi >> 20) : (hi >> 10);
@@ -364,6 +374,8 @@ int cudbg_fill_meminfo(struct adapter *padap,
meminfo_buff->tx_pages_data[3] = 1 << PMTXNUMCHN_G(lo);
meminfo_buff->p_structs = t4_read_reg(padap, TP_CMM_MM_MAX_PSTRUCT_A);
+ meminfo_buff->p_structs_free_cnt =
+ FREEPSTRUCTCOUNT_G(t4_read_reg(padap, TP_FLM_FREE_PS_CNT_A));
for (i = 0; i < 4; i++) {
if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
@@ -1465,14 +1477,23 @@ int cudbg_collect_meminfo(struct cudbg_init *pdbg_init,
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
struct cudbg_meminfo *meminfo_buff;
+ struct cudbg_ver_hdr *ver_hdr;
int rc;
- rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_meminfo),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff,
+ sizeof(struct cudbg_ver_hdr) +
+ sizeof(struct cudbg_meminfo),
&temp_buff);
if (rc)
return rc;
- meminfo_buff = (struct cudbg_meminfo *)temp_buff.data;
+ ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data;
+ ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
+ ver_hdr->revision = CUDBG_MEMINFO_REV;
+ ver_hdr->size = sizeof(struct cudbg_meminfo);
+
+ meminfo_buff = (struct cudbg_meminfo *)(temp_buff.data +
+ sizeof(*ver_hdr));
rc = cudbg_fill_meminfo(padap, meminfo_buff);
if (rc) {
cudbg_err->sys_err = rc;
@@ -2586,15 +2607,24 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
struct cudbg_ulptx_la *ulptx_la_buff;
+ struct cudbg_ver_hdr *ver_hdr;
u32 i, j;
int rc;
- rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulptx_la),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff,
+ sizeof(struct cudbg_ver_hdr) +
+ sizeof(struct cudbg_ulptx_la),
&temp_buff);
if (rc)
return rc;
- ulptx_la_buff = (struct cudbg_ulptx_la *)temp_buff.data;
+ ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data;
+ ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
+ ver_hdr->revision = CUDBG_ULPTX_LA_REV;
+ ver_hdr->size = sizeof(struct cudbg_ulptx_la);
+
+ ulptx_la_buff = (struct cudbg_ulptx_la *)(temp_buff.data +
+ sizeof(*ver_hdr));
for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
ULP_TX_LA_RDPTR_0_A +
@@ -2610,6 +2640,25 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
t4_read_reg(padap,
ULP_TX_LA_RDDATA_0_A + 0x10 * i);
}
+
+ for (i = 0; i < CUDBG_NUM_ULPTX_ASIC_READ; i++) {
+ t4_write_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A, 0x1);
+ ulptx_la_buff->rdptr_asic[i] =
+ t4_read_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A);
+ ulptx_la_buff->rddata_asic[i][0] =
+ t4_read_reg(padap, ULP_TX_ASIC_DEBUG_0_A);
+ ulptx_la_buff->rddata_asic[i][1] =
+ t4_read_reg(padap, ULP_TX_ASIC_DEBUG_1_A);
+ ulptx_la_buff->rddata_asic[i][2] =
+ t4_read_reg(padap, ULP_TX_ASIC_DEBUG_2_A);
+ ulptx_la_buff->rddata_asic[i][3] =
+ t4_read_reg(padap, ULP_TX_ASIC_DEBUG_3_A);
+ ulptx_la_buff->rddata_asic[i][4] =
+ t4_read_reg(padap, ULP_TX_ASIC_DEBUG_4_A);
+ ulptx_la_buff->rddata_asic[i][5] =
+ t4_read_reg(padap, PM_RX_BASE_ADDR);
+ }
+
return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 0dbe2d9e22d6..76d16747f513 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -46,6 +46,7 @@
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/vmalloc.h>
+#include <linux/rhashtable.h>
#include <linux/etherdevice.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
@@ -319,6 +320,21 @@ struct vpd_params {
u8 na[MACADDR_LEN + 1];
};
+/* Maximum resources provisioned for a PCI PF.
+ */
+struct pf_resources {
+ unsigned int nvi; /* N virtual interfaces */
+ unsigned int neq; /* N egress Qs */
+ unsigned int nethctrl; /* N egress ETH or CTRL Qs */
+ unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */
+ unsigned int niq; /* N ingress Qs */
+ unsigned int tc; /* PCI-E traffic class */
+ unsigned int pmask; /* port access rights mask */
+ unsigned int nexactf; /* N exact MPS filters */
+ unsigned int r_caps; /* read capabilities */
+ unsigned int wx_caps; /* write/execute capabilities */
+};
+
struct pci_params {
unsigned int vpd_cap_addr;
unsigned char speed;
@@ -346,6 +362,7 @@ struct adapter_params {
struct sge_params sge;
struct tp_params tp;
struct vpd_params vpd;
+ struct pf_resources pfres;
struct pci_params pci;
struct devlog_params devlog;
enum pcie_memwin drv_memwin;
@@ -521,6 +538,15 @@ enum {
MAX_INGQ = MAX_ETH_QSETS + INGQ_EXTRAS,
};
+enum {
+ PRIV_FLAG_PORT_TX_VM_BIT,
+};
+
+#define PRIV_FLAG_PORT_TX_VM BIT(PRIV_FLAG_PORT_TX_VM_BIT)
+
+#define PRIV_FLAGS_ADAP 0
+#define PRIV_FLAGS_PORT PRIV_FLAG_PORT_TX_VM
+
struct adapter;
struct sge_rspq;
@@ -557,6 +583,7 @@ struct port_info {
struct hwtstamp_config tstamp_config;
bool ptp_enable;
struct sched_table *sched_tbl;
+ u32 eth_flags;
};
struct dentry;
@@ -867,6 +894,7 @@ struct adapter {
unsigned int flags;
unsigned int adap_idx;
enum chip_type chip;
+ u32 eth_flags;
int msg_enable;
__be16 vxlan_port;
@@ -956,6 +984,7 @@ struct adapter {
struct chcr_stats_debug chcr_stats;
/* TC flower offload */
+ bool tc_flower_initialized;
struct rhashtable flower_tbl;
struct rhashtable_params flower_ht_params;
struct timer_list flower_stats_timer;
@@ -1333,7 +1362,7 @@ void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
void t4_free_sge_resources(struct adapter *adap);
void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q);
irq_handler_t t4_intr_handler(struct adapter *adap);
-netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev);
int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *gl);
int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
@@ -1555,6 +1584,7 @@ int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz);
int t4_seeprom_wp(struct adapter *adapter, bool enable);
int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p);
int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p);
+int t4_get_pfres(struct adapter *adapter);
int t4_read_flash(struct adapter *adapter, unsigned int addr,
unsigned int nwords, u32 *data, int byte_oriented);
int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
@@ -1823,4 +1853,5 @@ void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n);
int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
u16 vlan);
+int cxgb4_dcb_enabled(const struct net_device *dev);
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
index 8d751efcb90e..5f01c0a7fd98 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
@@ -224,7 +224,8 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
break;
case CUDBG_MEMINFO:
- len = sizeof(struct cudbg_meminfo);
+ len = sizeof(struct cudbg_ver_hdr) +
+ sizeof(struct cudbg_meminfo);
break;
case CUDBG_CIM_PIF_LA:
len = sizeof(struct cudbg_cim_pif_la);
@@ -273,7 +274,8 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
}
break;
case CUDBG_ULPTX_LA:
- len = sizeof(struct cudbg_ulptx_la);
+ len = sizeof(struct cudbg_ver_hdr) +
+ sizeof(struct cudbg_ulptx_la);
break;
case CUDBG_UP_CIM_INDIRECT:
n = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 4e7f72b17e82..b34f0f077a31 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -22,7 +22,7 @@
/* DCBx version control
*/
-static const char * const dcb_ver_array[] = {
+const char * const dcb_ver_array[] = {
"Unknown",
"DCBx-CIN",
"DCBx-CEE 1.01",
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index c301aaf79d64..0f72f9c4ec74 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2414,6 +2414,234 @@ static const struct file_operations rss_vf_config_debugfs_fops = {
.release = seq_release_private
};
+#ifdef CONFIG_CHELSIO_T4_DCB
+extern char *dcb_ver_array[];
+
+/* Data Center Briging information for each port.
+ */
+static int dcb_info_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adap = seq->private;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq, "Data Center Bridging Information\n");
+ } else {
+ int port = (uintptr_t)v - 2;
+ struct net_device *dev = adap->port[port];
+ struct port_info *pi = netdev2pinfo(dev);
+ struct port_dcb_info *dcb = &pi->dcb;
+
+ seq_puts(seq, "\n");
+ seq_printf(seq, "Port: %d (DCB negotiated: %s)\n",
+ port,
+ cxgb4_dcb_enabled(dev) ? "yes" : "no");
+
+ if (cxgb4_dcb_enabled(dev))
+ seq_printf(seq, "[ DCBx Version %s ]\n",
+ dcb_ver_array[dcb->dcb_version]);
+
+ if (dcb->msgs) {
+ int i;
+
+ seq_puts(seq, "\n Index\t\t\t :\t");
+ for (i = 0; i < 8; i++)
+ seq_printf(seq, " %3d", i);
+ seq_puts(seq, "\n\n");
+ }
+
+ if (dcb->msgs & CXGB4_DCB_FW_PGID) {
+ int prio, pgid;
+
+ seq_puts(seq, " Priority Group IDs\t :\t");
+ for (prio = 0; prio < 8; prio++) {
+ pgid = (dcb->pgid >> 4 * (7 - prio)) & 0xf;
+ seq_printf(seq, " %3d", pgid);
+ }
+ seq_puts(seq, "\n");
+ }
+
+ if (dcb->msgs & CXGB4_DCB_FW_PGRATE) {
+ int pg;
+
+ seq_puts(seq, " Priority Group BW(%)\t :\t");
+ for (pg = 0; pg < 8; pg++)
+ seq_printf(seq, " %3d", dcb->pgrate[pg]);
+ seq_puts(seq, "\n");
+
+ if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) {
+ seq_puts(seq, " TSA Algorithm\t\t :\t");
+ for (pg = 0; pg < 8; pg++)
+ seq_printf(seq, " %3d", dcb->tsa[pg]);
+ seq_puts(seq, "\n");
+ }
+
+ seq_printf(seq, " Max PG Traffic Classes [%3d ]\n",
+ dcb->pg_num_tcs_supported);
+
+ seq_puts(seq, "\n");
+ }
+
+ if (dcb->msgs & CXGB4_DCB_FW_PRIORATE) {
+ int prio;
+
+ seq_puts(seq, " Priority Rate\t:\t");
+ for (prio = 0; prio < 8; prio++)
+ seq_printf(seq, " %3d", dcb->priorate[prio]);
+ seq_puts(seq, "\n");
+ }
+
+ if (dcb->msgs & CXGB4_DCB_FW_PFC) {
+ int prio;
+
+ seq_puts(seq, " Priority Flow Control :\t");
+ for (prio = 0; prio < 8; prio++) {
+ int pfcen = (dcb->pfcen >> 1 * (7 - prio))
+ & 0x1;
+ seq_printf(seq, " %3d", pfcen);
+ }
+ seq_puts(seq, "\n");
+
+ seq_printf(seq, " Max PFC Traffic Classes [%3d ]\n",
+ dcb->pfc_num_tcs_supported);
+
+ seq_puts(seq, "\n");
+ }
+
+ if (dcb->msgs & CXGB4_DCB_FW_APP_ID) {
+ int app, napps;
+
+ seq_puts(seq, " Application Information:\n");
+ seq_puts(seq, " App Priority Selection Protocol\n");
+ seq_puts(seq, " Index Map Field ID\n");
+ for (app = 0, napps = 0;
+ app < CXGB4_MAX_DCBX_APP_SUPPORTED; app++) {
+ struct app_priority *ap;
+ static const char * const sel_names[] = {
+ "Ethertype",
+ "Socket TCP",
+ "Socket UDP",
+ "Socket All",
+ };
+ const char *sel_name;
+
+ ap = &dcb->app_priority[app];
+ /* skip empty slots */
+ if (ap->protocolid == 0)
+ continue;
+ napps++;
+
+ if (ap->sel_field < ARRAY_SIZE(sel_names))
+ sel_name = sel_names[ap->sel_field];
+ else
+ sel_name = "UNKNOWN";
+
+ seq_printf(seq, " %3d %#04x %-10s (%d) %#06x (%d)\n",
+ app,
+ ap->user_prio_map,
+ sel_name, ap->sel_field,
+ ap->protocolid, ap->protocolid);
+ }
+ if (napps == 0)
+ seq_puts(seq, " --- None ---\n");
+ }
+ }
+ return 0;
+}
+
+static inline void *dcb_info_get_idx(struct adapter *adap, loff_t pos)
+{
+ return (pos <= adap->params.nports
+ ? (void *)((uintptr_t)pos + 1)
+ : NULL);
+}
+
+static void *dcb_info_start(struct seq_file *seq, loff_t *pos)
+{
+ struct adapter *adap = seq->private;
+
+ return (*pos
+ ? dcb_info_get_idx(adap, *pos)
+ : SEQ_START_TOKEN);
+}
+
+static void dcb_info_stop(struct seq_file *seq, void *v)
+{
+}
+
+static void *dcb_info_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct adapter *adap = seq->private;
+
+ (*pos)++;
+ return dcb_info_get_idx(adap, *pos);
+}
+
+static const struct seq_operations dcb_info_seq_ops = {
+ .start = dcb_info_start,
+ .next = dcb_info_next,
+ .stop = dcb_info_stop,
+ .show = dcb_info_show
+};
+
+static int dcb_info_open(struct inode *inode, struct file *file)
+{
+ int res = seq_open(file, &dcb_info_seq_ops);
+
+ if (!res) {
+ struct seq_file *seq = file->private_data;
+
+ seq->private = inode->i_private;
+ }
+ return res;
+}
+
+static const struct file_operations dcb_info_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = dcb_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+#endif /* CONFIG_CHELSIO_T4_DCB */
+
+static int resources_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adapter = seq->private;
+ struct pf_resources *pfres = &adapter->params.pfres;
+
+ #define S(desc, fmt, var) \
+ seq_printf(seq, "%-60s " fmt "\n", \
+ desc " (" #var "):", pfres->var)
+
+ S("Virtual Interfaces", "%d", nvi);
+ S("Egress Queues", "%d", neq);
+ S("Ethernet Control", "%d", nethctrl);
+ S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint);
+ S("Ingress Queues", "%d", niq);
+ S("Traffic Class", "%d", tc);
+ S("Port Access Rights Mask", "%#x", pmask);
+ S("MAC Address Filters", "%d", nexactf);
+ S("Firmware Command Read Capabilities", "%#x", r_caps);
+ S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps);
+
+ #undef S
+
+ return 0;
+}
+
+static int resources_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, resources_show, inode->i_private);
+}
+
+static const struct file_operations resources_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = resources_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
/**
* ethqset2pinfo - return port_info of an Ethernet Queue Set
* @adap: the adapter
@@ -2436,16 +2664,64 @@ static inline struct port_info *ethqset2pinfo(struct adapter *adap, int qset)
return NULL;
}
+static int sge_qinfo_uld_txq_entries(const struct adapter *adap, int uld)
+{
+ const struct sge_uld_txq_info *utxq_info = adap->sge.uld_txq_info[uld];
+
+ if (!utxq_info)
+ return 0;
+
+ return DIV_ROUND_UP(utxq_info->ntxq, 4);
+}
+
+static int sge_qinfo_uld_rspq_entries(const struct adapter *adap, int uld,
+ bool ciq)
+{
+ const struct sge_uld_rxq_info *urxq_info = adap->sge.uld_rxq_info[uld];
+
+ if (!urxq_info)
+ return 0;
+
+ return ciq ? DIV_ROUND_UP(urxq_info->nciq, 4) :
+ DIV_ROUND_UP(urxq_info->nrxq, 4);
+}
+
+static int sge_qinfo_uld_rxq_entries(const struct adapter *adap, int uld)
+{
+ return sge_qinfo_uld_rspq_entries(adap, uld, false);
+}
+
+static int sge_qinfo_uld_ciq_entries(const struct adapter *adap, int uld)
+{
+ return sge_qinfo_uld_rspq_entries(adap, uld, true);
+}
+
static int sge_qinfo_show(struct seq_file *seq, void *v)
{
+ int uld_rxq_entries[CXGB4_ULD_MAX] = { 0 };
+ int uld_ciq_entries[CXGB4_ULD_MAX] = { 0 };
+ int uld_txq_entries[CXGB4_TX_MAX] = { 0 };
+ const struct sge_uld_txq_info *utxq_info;
+ const struct sge_uld_rxq_info *urxq_info;
struct adapter *adap = seq->private;
- int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
- int ofld_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
- int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
- int i, r = (uintptr_t)v - 1;
- int ofld_idx = r - eth_entries;
- int ctrl_idx = ofld_idx - ofld_entries;
- int fq_idx = ctrl_idx - ctrl_entries;
+ int i, n, r = (uintptr_t)v - 1;
+ int eth_entries, ctrl_entries;
+ struct sge *s = &adap->sge;
+
+ eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
+ ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
+
+ mutex_lock(&uld_mutex);
+ if (s->uld_txq_info)
+ for (i = 0; i < ARRAY_SIZE(uld_txq_entries); i++)
+ uld_txq_entries[i] = sge_qinfo_uld_txq_entries(adap, i);
+
+ if (s->uld_rxq_info) {
+ for (i = 0; i < ARRAY_SIZE(uld_rxq_entries); i++) {
+ uld_rxq_entries[i] = sge_qinfo_uld_rxq_entries(adap, i);
+ uld_ciq_entries[i] = sge_qinfo_uld_ciq_entries(adap, i);
+ }
+ }
if (r)
seq_putc(seq, '\n');
@@ -2467,9 +2743,10 @@ do { \
if (r < eth_entries) {
int base_qset = r * 4;
- const struct sge_eth_rxq *rx = &adap->sge.ethrxq[base_qset];
- const struct sge_eth_txq *tx = &adap->sge.ethtxq[base_qset];
- int n = min(4, adap->sge.ethqsets - 4 * r);
+ const struct sge_eth_rxq *rx = &s->ethrxq[base_qset];
+ const struct sge_eth_txq *tx = &s->ethtxq[base_qset];
+
+ n = min(4, s->ethqsets - 4 * r);
S("QType:", "Ethernet");
S("Interface:",
@@ -2494,8 +2771,7 @@ do { \
R("RspQ CIDX:", rspq.cidx);
R("RspQ Gen:", rspq.gen);
S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
- S3("u", "Intr pktcnt:",
- adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
R("FL ID:", fl.cntxt_id);
R("FL size:", fl.size - 8);
R("FL pend:", fl.pend_cred);
@@ -2520,9 +2796,196 @@ do { \
RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving);
- } else if (ctrl_idx < ctrl_entries) {
- const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4];
- int n = min(4, adap->params.nports - 4 * ctrl_idx);
+ goto unlock;
+ }
+
+ r -= eth_entries;
+ if (r < uld_txq_entries[CXGB4_TX_OFLD]) {
+ const struct sge_uld_txq *tx;
+
+ utxq_info = s->uld_txq_info[CXGB4_TX_OFLD];
+ tx = &utxq_info->uldtxq[r * 4];
+ n = min(4, utxq_info->ntxq - 4 * r);
+
+ S("QType:", "OFLD-TXQ");
+ T("TxQ ID:", q.cntxt_id);
+ T("TxQ size:", q.size);
+ T("TxQ inuse:", q.in_use);
+ T("TxQ CIDX:", q.cidx);
+ T("TxQ PIDX:", q.pidx);
+
+ goto unlock;
+ }
+
+ r -= uld_txq_entries[CXGB4_TX_OFLD];
+ if (r < uld_rxq_entries[CXGB4_ULD_RDMA]) {
+ const struct sge_ofld_rxq *rx;
+
+ urxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
+ rx = &urxq_info->uldrxq[r * 4];
+ n = min(4, urxq_info->nrxq - 4 * r);
+
+ S("QType:", "RDMA-CPL");
+ S("Interface:",
+ rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
+ R("FL ID:", fl.cntxt_id);
+ R("FL size:", fl.size - 8);
+ R("FL pend:", fl.pend_cred);
+ R("FL avail:", fl.avail);
+ R("FL PIDX:", fl.pidx);
+ R("FL CIDX:", fl.cidx);
+
+ goto unlock;
+ }
+
+ r -= uld_rxq_entries[CXGB4_ULD_RDMA];
+ if (r < uld_ciq_entries[CXGB4_ULD_RDMA]) {
+ const struct sge_ofld_rxq *rx;
+ int ciq_idx = 0;
+
+ urxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
+ ciq_idx = urxq_info->nrxq + (r * 4);
+ rx = &urxq_info->uldrxq[ciq_idx];
+ n = min(4, urxq_info->nciq - 4 * r);
+
+ S("QType:", "RDMA-CIQ");
+ S("Interface:",
+ rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
+
+ goto unlock;
+ }
+
+ r -= uld_ciq_entries[CXGB4_ULD_RDMA];
+ if (r < uld_rxq_entries[CXGB4_ULD_ISCSI]) {
+ const struct sge_ofld_rxq *rx;
+
+ urxq_info = s->uld_rxq_info[CXGB4_ULD_ISCSI];
+ rx = &urxq_info->uldrxq[r * 4];
+ n = min(4, urxq_info->nrxq - 4 * r);
+
+ S("QType:", "iSCSI");
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
+ R("FL ID:", fl.cntxt_id);
+ R("FL size:", fl.size - 8);
+ R("FL pend:", fl.pend_cred);
+ R("FL avail:", fl.avail);
+ R("FL PIDX:", fl.pidx);
+ R("FL CIDX:", fl.cidx);
+
+ goto unlock;
+ }
+
+ r -= uld_rxq_entries[CXGB4_ULD_ISCSI];
+ if (r < uld_rxq_entries[CXGB4_ULD_ISCSIT]) {
+ const struct sge_ofld_rxq *rx;
+
+ urxq_info = s->uld_rxq_info[CXGB4_ULD_ISCSIT];
+ rx = &urxq_info->uldrxq[r * 4];
+ n = min(4, urxq_info->nrxq - 4 * r);
+
+ S("QType:", "iSCSIT");
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
+ R("FL ID:", fl.cntxt_id);
+ R("FL size:", fl.size - 8);
+ R("FL pend:", fl.pend_cred);
+ R("FL avail:", fl.avail);
+ R("FL PIDX:", fl.pidx);
+ R("FL CIDX:", fl.cidx);
+
+ goto unlock;
+ }
+
+ r -= uld_rxq_entries[CXGB4_ULD_ISCSIT];
+ if (r < uld_rxq_entries[CXGB4_ULD_TLS]) {
+ const struct sge_ofld_rxq *rx;
+
+ urxq_info = s->uld_rxq_info[CXGB4_ULD_TLS];
+ rx = &urxq_info->uldrxq[r * 4];
+ n = min(4, urxq_info->nrxq - 4 * r);
+
+ S("QType:", "TLS");
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
+ R("FL ID:", fl.cntxt_id);
+ R("FL size:", fl.size - 8);
+ R("FL pend:", fl.pend_cred);
+ R("FL avail:", fl.avail);
+ R("FL PIDX:", fl.pidx);
+ R("FL CIDX:", fl.cidx);
+
+ goto unlock;
+ }
+
+ r -= uld_rxq_entries[CXGB4_ULD_TLS];
+ if (r < uld_txq_entries[CXGB4_TX_CRYPTO]) {
+ const struct sge_ofld_rxq *rx;
+ const struct sge_uld_txq *tx;
+
+ utxq_info = s->uld_txq_info[CXGB4_TX_CRYPTO];
+ urxq_info = s->uld_rxq_info[CXGB4_ULD_CRYPTO];
+ tx = &utxq_info->uldtxq[r * 4];
+ rx = &urxq_info->uldrxq[r * 4];
+ n = min(4, utxq_info->ntxq - 4 * r);
+
+ S("QType:", "Crypto");
+ T("TxQ ID:", q.cntxt_id);
+ T("TxQ size:", q.size);
+ T("TxQ inuse:", q.in_use);
+ T("TxQ CIDX:", q.cidx);
+ T("TxQ PIDX:", q.pidx);
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
+ R("FL ID:", fl.cntxt_id);
+ R("FL size:", fl.size - 8);
+ R("FL pend:", fl.pend_cred);
+ R("FL avail:", fl.avail);
+ R("FL PIDX:", fl.pidx);
+ R("FL CIDX:", fl.cidx);
+
+ goto unlock;
+ }
+
+ r -= uld_txq_entries[CXGB4_TX_CRYPTO];
+ if (r < ctrl_entries) {
+ const struct sge_ctrl_txq *tx = &s->ctrlq[r * 4];
+
+ n = min(4, adap->params.nports - 4 * r);
S("QType:", "Control");
T("TxQ ID:", q.cntxt_id);
@@ -2532,8 +2995,13 @@ do { \
T("TxQ PIDX:", q.pidx);
TL("TxQFull:", q.stops);
TL("TxQRestarts:", q.restarts);
- } else if (fq_idx == 0) {
- const struct sge_rspq *evtq = &adap->sge.fw_evtq;
+
+ goto unlock;
+ }
+
+ r -= ctrl_entries;
+ if (r < 1) {
+ const struct sge_rspq *evtq = &s->fw_evtq;
seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
@@ -2544,8 +3012,13 @@ do { \
seq_printf(seq, "%-12s %16u\n", "Intr delay:",
qtimer_val(adap, evtq));
seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
- adap->sge.counter_val[evtq->pktcnt_idx]);
+ s->counter_val[evtq->pktcnt_idx]);
+
+ goto unlock;
}
+
+unlock:
+ mutex_unlock(&uld_mutex);
#undef R
#undef RL
#undef T
@@ -2559,8 +3032,21 @@ do { \
static int sge_queue_entries(const struct adapter *adap)
{
+ int tot_uld_entries = 0;
+ int i;
+
+ mutex_lock(&uld_mutex);
+ for (i = 0; i < CXGB4_TX_MAX; i++)
+ tot_uld_entries += sge_qinfo_uld_txq_entries(adap, i);
+
+ for (i = 0; i < CXGB4_ULD_MAX; i++) {
+ tot_uld_entries += sge_qinfo_uld_rxq_entries(adap, i);
+ tot_uld_entries += sge_qinfo_uld_ciq_entries(adap, i);
+ }
+ mutex_unlock(&uld_mutex);
+
return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
- DIV_ROUND_UP(adap->sge.ofldqsets, 4) +
+ tot_uld_entries +
DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
}
@@ -2851,15 +3337,17 @@ static int meminfo_show(struct seq_file *seq, void *v)
mem_region_show(seq, "uP Extmem2:", meminfo.up_extmem2_lo,
meminfo.up_extmem2_hi);
- seq_printf(seq, "\n%u Rx pages of size %uKiB for %u channels\n",
- meminfo.rx_pages_data[0], meminfo.rx_pages_data[1],
- meminfo.rx_pages_data[2]);
+ seq_printf(seq, "\n%u Rx pages (%u free) of size %uKiB for %u channels\n",
+ meminfo.rx_pages_data[0], meminfo.free_rx_cnt,
+ meminfo.rx_pages_data[1], meminfo.rx_pages_data[2]);
- seq_printf(seq, "%u Tx pages of size %u%ciB for %u channels\n",
- meminfo.tx_pages_data[0], meminfo.tx_pages_data[1],
- meminfo.tx_pages_data[2], meminfo.tx_pages_data[3]);
+ seq_printf(seq, "%u Tx pages (%u free) of size %u%ciB for %u channels\n",
+ meminfo.tx_pages_data[0], meminfo.free_tx_cnt,
+ meminfo.tx_pages_data[1], meminfo.tx_pages_data[2],
+ meminfo.tx_pages_data[3]);
- seq_printf(seq, "%u p-structs\n\n", meminfo.p_structs);
+ seq_printf(seq, "%u p-structs (%u free)\n\n",
+ meminfo.p_structs, meminfo.p_structs_free_cnt);
for (i = 0; i < 4; i++)
/* For T6 these are MAC buffer groups */
@@ -2924,6 +3412,169 @@ static const struct file_operations chcr_stats_debugfs_fops = {
.llseek = seq_lseek,
.release = single_release,
};
+
+#define PRINT_ADAP_STATS(string, value) \
+ seq_printf(seq, "%-25s %-20llu\n", (string), \
+ (unsigned long long)(value))
+
+#define PRINT_CH_STATS(string, value) \
+do { \
+ seq_printf(seq, "%-25s ", (string)); \
+ for (i = 0; i < adap->params.arch.nchan; i++) \
+ seq_printf(seq, "%-20llu ", \
+ (unsigned long long)stats.value[i]); \
+ seq_printf(seq, "\n"); \
+} while (0)
+
+#define PRINT_CH_STATS2(string, value) \
+do { \
+ seq_printf(seq, "%-25s ", (string)); \
+ for (i = 0; i < adap->params.arch.nchan; i++) \
+ seq_printf(seq, "%-20llu ", \
+ (unsigned long long)stats[i].value); \
+ seq_printf(seq, "\n"); \
+} while (0)
+
+static void show_tcp_stats(struct seq_file *seq)
+{
+ struct adapter *adap = seq->private;
+ struct tp_tcp_stats v4, v6;
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_tcp_stats(adap, &v4, &v6, false);
+ spin_unlock(&adap->stats_lock);
+
+ PRINT_ADAP_STATS("tcp_ipv4_out_rsts:", v4.tcp_out_rsts);
+ PRINT_ADAP_STATS("tcp_ipv4_in_segs:", v4.tcp_in_segs);
+ PRINT_ADAP_STATS("tcp_ipv4_out_segs:", v4.tcp_out_segs);
+ PRINT_ADAP_STATS("tcp_ipv4_retrans_segs:", v4.tcp_retrans_segs);
+ PRINT_ADAP_STATS("tcp_ipv6_out_rsts:", v6.tcp_out_rsts);
+ PRINT_ADAP_STATS("tcp_ipv6_in_segs:", v6.tcp_in_segs);
+ PRINT_ADAP_STATS("tcp_ipv6_out_segs:", v6.tcp_out_segs);
+ PRINT_ADAP_STATS("tcp_ipv6_retrans_segs:", v6.tcp_retrans_segs);
+}
+
+static void show_ddp_stats(struct seq_file *seq)
+{
+ struct adapter *adap = seq->private;
+ struct tp_usm_stats stats;
+
+ spin_lock(&adap->stats_lock);
+ t4_get_usm_stats(adap, &stats, false);
+ spin_unlock(&adap->stats_lock);
+
+ PRINT_ADAP_STATS("usm_ddp_frames:", stats.frames);
+ PRINT_ADAP_STATS("usm_ddp_octets:", stats.octets);
+ PRINT_ADAP_STATS("usm_ddp_drops:", stats.drops);
+}
+
+static void show_rdma_stats(struct seq_file *seq)
+{
+ struct adapter *adap = seq->private;
+ struct tp_rdma_stats stats;
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_rdma_stats(adap, &stats, false);
+ spin_unlock(&adap->stats_lock);
+
+ PRINT_ADAP_STATS("rdma_no_rqe_mod_defer:", stats.rqe_dfr_mod);
+ PRINT_ADAP_STATS("rdma_no_rqe_pkt_defer:", stats.rqe_dfr_pkt);
+}
+
+static void show_tp_err_adapter_stats(struct seq_file *seq)
+{
+ struct adapter *adap = seq->private;
+ struct tp_err_stats stats;
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_err_stats(adap, &stats, false);
+ spin_unlock(&adap->stats_lock);
+
+ PRINT_ADAP_STATS("tp_err_ofld_no_neigh:", stats.ofld_no_neigh);
+ PRINT_ADAP_STATS("tp_err_ofld_cong_defer:", stats.ofld_cong_defer);
+}
+
+static void show_cpl_stats(struct seq_file *seq)
+{
+ struct adapter *adap = seq->private;
+ struct tp_cpl_stats stats;
+ u8 i;
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_cpl_stats(adap, &stats, false);
+ spin_unlock(&adap->stats_lock);
+
+ PRINT_CH_STATS("tp_cpl_requests:", req);
+ PRINT_CH_STATS("tp_cpl_responses:", rsp);
+}
+
+static void show_tp_err_channel_stats(struct seq_file *seq)
+{
+ struct adapter *adap = seq->private;
+ struct tp_err_stats stats;
+ u8 i;
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_err_stats(adap, &stats, false);
+ spin_unlock(&adap->stats_lock);
+
+ PRINT_CH_STATS("tp_mac_in_errs:", mac_in_errs);
+ PRINT_CH_STATS("tp_hdr_in_errs:", hdr_in_errs);
+ PRINT_CH_STATS("tp_tcp_in_errs:", tcp_in_errs);
+ PRINT_CH_STATS("tp_tcp6_in_errs:", tcp6_in_errs);
+ PRINT_CH_STATS("tp_tnl_cong_drops:", tnl_cong_drops);
+ PRINT_CH_STATS("tp_tnl_tx_drops:", tnl_tx_drops);
+ PRINT_CH_STATS("tp_ofld_vlan_drops:", ofld_vlan_drops);
+ PRINT_CH_STATS("tp_ofld_chan_drops:", ofld_chan_drops);
+}
+
+static void show_fcoe_stats(struct seq_file *seq)
+{
+ struct adapter *adap = seq->private;
+ struct tp_fcoe_stats stats[NCHAN];
+ u8 i;
+
+ spin_lock(&adap->stats_lock);
+ for (i = 0; i < adap->params.arch.nchan; i++)
+ t4_get_fcoe_stats(adap, i, &stats[i], false);
+ spin_unlock(&adap->stats_lock);
+
+ PRINT_CH_STATS2("fcoe_octets_ddp", octets_ddp);
+ PRINT_CH_STATS2("fcoe_frames_ddp", frames_ddp);
+ PRINT_CH_STATS2("fcoe_frames_drop", frames_drop);
+}
+
+#undef PRINT_CH_STATS2
+#undef PRINT_CH_STATS
+#undef PRINT_ADAP_STATS
+
+static int tp_stats_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adap = seq->private;
+
+ seq_puts(seq, "\n--------Adapter Stats--------\n");
+ show_tcp_stats(seq);
+ show_ddp_stats(seq);
+ show_rdma_stats(seq);
+ show_tp_err_adapter_stats(seq);
+
+ seq_puts(seq, "\n-------- Channel Stats --------\n");
+ if (adap->params.arch.nchan == NCHAN)
+ seq_printf(seq, "%-25s %-20s %-20s %-20s %-20s\n",
+ " ", "channel 0", "channel 1",
+ "channel 2", "channel 3");
+ else
+ seq_printf(seq, "%-25s %-20s %-20s\n",
+ " ", "channel 0", "channel 1");
+ show_cpl_stats(seq);
+ show_tp_err_channel_stats(seq);
+ show_fcoe_stats(seq);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_DEBUGFS_FILE(tp_stats);
+
/* Add an array of Debug FS files.
*/
void add_debugfs_files(struct adapter *adap,
@@ -2973,6 +3624,10 @@ int t4_setup_debugfs(struct adapter *adap)
{ "rss_key", &rss_key_debugfs_fops, 0400, 0 },
{ "rss_pf_config", &rss_pf_config_debugfs_fops, 0400, 0 },
{ "rss_vf_config", &rss_vf_config_debugfs_fops, 0400, 0 },
+ { "resources", &resources_debugfs_fops, 0400, 0 },
+#ifdef CONFIG_CHELSIO_T4_DCB
+ { "dcb_info", &dcb_info_debugfs_fops, 0400, 0 },
+#endif
{ "sge_qinfo", &sge_qinfo_debugfs_fops, 0400, 0 },
{ "ibq_tp0", &cim_ibq_fops, 0400, 0 },
{ "ibq_tp1", &cim_ibq_fops, 0400, 1 },
@@ -2999,6 +3654,7 @@ int t4_setup_debugfs(struct adapter *adap)
{ "blocked_fl", &blocked_fl_fops, 0600, 0 },
{ "meminfo", &meminfo_fops, 0400, 0 },
{ "crypto", &chcr_stats_debugfs_fops, 0400, 0 },
+ { "tp_stats", &tp_stats_debugfs_fops, 0400, 0 },
};
/* Debug FS nodes common to all T5 and later adapters.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index f7eef93ffc87..d07230c892a5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -115,42 +115,10 @@ static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
"db_drop ",
"db_full ",
"db_empty ",
- "tcp_ipv4_out_rsts ",
- "tcp_ipv4_in_segs ",
- "tcp_ipv4_out_segs ",
- "tcp_ipv4_retrans_segs ",
- "tcp_ipv6_out_rsts ",
- "tcp_ipv6_in_segs ",
- "tcp_ipv6_out_segs ",
- "tcp_ipv6_retrans_segs ",
- "usm_ddp_frames ",
- "usm_ddp_octets ",
- "usm_ddp_drops ",
- "rdma_no_rqe_mod_defer ",
- "rdma_no_rqe_pkt_defer ",
- "tp_err_ofld_no_neigh ",
- "tp_err_ofld_cong_defer ",
"write_coal_success ",
"write_coal_fail ",
};
-static char channel_stats_strings[][ETH_GSTRING_LEN] = {
- "--------Channel--------- ",
- "tp_cpl_requests ",
- "tp_cpl_responses ",
- "tp_mac_in_errs ",
- "tp_hdr_in_errs ",
- "tp_tcp_in_errs ",
- "tp_tcp6_in_errs ",
- "tp_tnl_cong_drops ",
- "tp_tnl_tx_drops ",
- "tp_ofld_vlan_drops ",
- "tp_ofld_chan_drops ",
- "fcoe_octets_ddp ",
- "fcoe_frames_ddp ",
- "fcoe_frames_drop ",
-};
-
static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
"-------Loopback----------- ",
"octets_ok ",
@@ -177,14 +145,19 @@ static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
"bg3_frames_trunc ",
};
+static const char cxgb4_priv_flags_strings[][ETH_GSTRING_LEN] = {
+ [PRIV_FLAG_PORT_TX_VM_BIT] = "port_tx_vm_wr",
+};
+
static int get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
return ARRAY_SIZE(stats_strings) +
ARRAY_SIZE(adapter_stats_strings) +
- ARRAY_SIZE(channel_stats_strings) +
ARRAY_SIZE(loopback_stats_strings);
+ case ETH_SS_PRIV_FLAGS:
+ return ARRAY_SIZE(cxgb4_priv_flags_strings);
default:
return -EOPNOTSUPP;
}
@@ -235,6 +208,7 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
FW_HDR_FW_VER_MINOR_G(exprom_vers),
FW_HDR_FW_VER_MICRO_G(exprom_vers),
FW_HDR_FW_VER_BUILD_G(exprom_vers));
+ info->n_priv_flags = ARRAY_SIZE(cxgb4_priv_flags_strings);
}
static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -245,11 +219,11 @@ static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
memcpy(data, adapter_stats_strings,
sizeof(adapter_stats_strings));
data += sizeof(adapter_stats_strings);
- memcpy(data, channel_stats_strings,
- sizeof(channel_stats_strings));
- data += sizeof(channel_stats_strings);
memcpy(data, loopback_stats_strings,
sizeof(loopback_stats_strings));
+ } else if (stringset == ETH_SS_PRIV_FLAGS) {
+ memcpy(data, cxgb4_priv_flags_strings,
+ sizeof(cxgb4_priv_flags_strings));
}
}
@@ -270,41 +244,10 @@ struct adapter_stats {
u64 db_drop;
u64 db_full;
u64 db_empty;
- u64 tcp_v4_out_rsts;
- u64 tcp_v4_in_segs;
- u64 tcp_v4_out_segs;
- u64 tcp_v4_retrans_segs;
- u64 tcp_v6_out_rsts;
- u64 tcp_v6_in_segs;
- u64 tcp_v6_out_segs;
- u64 tcp_v6_retrans_segs;
- u64 frames;
- u64 octets;
- u64 drops;
- u64 rqe_dfr_mod;
- u64 rqe_dfr_pkt;
- u64 ofld_no_neigh;
- u64 ofld_cong_defer;
u64 wc_success;
u64 wc_fail;
};
-struct channel_stats {
- u64 cpl_req;
- u64 cpl_rsp;
- u64 mac_in_errs;
- u64 hdr_in_errs;
- u64 tcp_in_errs;
- u64 tcp6_in_errs;
- u64 tnl_cong_drops;
- u64 tnl_tx_drops;
- u64 ofld_vlan_drops;
- u64 ofld_chan_drops;
- u64 octets_ddp;
- u64 frames_ddp;
- u64 frames_drop;
-};
-
static void collect_sge_port_stats(const struct adapter *adap,
const struct port_info *p,
struct queue_port_stats *s)
@@ -327,45 +270,14 @@ static void collect_sge_port_stats(const struct adapter *adap,
static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
{
- struct tp_tcp_stats v4, v6;
- struct tp_rdma_stats rdma_stats;
- struct tp_err_stats err_stats;
- struct tp_usm_stats usm_stats;
u64 val1, val2;
memset(s, 0, sizeof(*s));
- spin_lock(&adap->stats_lock);
- t4_tp_get_tcp_stats(adap, &v4, &v6, false);
- t4_tp_get_rdma_stats(adap, &rdma_stats, false);
- t4_get_usm_stats(adap, &usm_stats, false);
- t4_tp_get_err_stats(adap, &err_stats, false);
- spin_unlock(&adap->stats_lock);
-
s->db_drop = adap->db_stats.db_drop;
s->db_full = adap->db_stats.db_full;
s->db_empty = adap->db_stats.db_empty;
- s->tcp_v4_out_rsts = v4.tcp_out_rsts;
- s->tcp_v4_in_segs = v4.tcp_in_segs;
- s->tcp_v4_out_segs = v4.tcp_out_segs;
- s->tcp_v4_retrans_segs = v4.tcp_retrans_segs;
- s->tcp_v6_out_rsts = v6.tcp_out_rsts;
- s->tcp_v6_in_segs = v6.tcp_in_segs;
- s->tcp_v6_out_segs = v6.tcp_out_segs;
- s->tcp_v6_retrans_segs = v6.tcp_retrans_segs;
-
- if (is_offload(adap)) {
- s->frames = usm_stats.frames;
- s->octets = usm_stats.octets;
- s->drops = usm_stats.drops;
- s->rqe_dfr_mod = rdma_stats.rqe_dfr_mod;
- s->rqe_dfr_pkt = rdma_stats.rqe_dfr_pkt;
- }
-
- s->ofld_no_neigh = err_stats.ofld_no_neigh;
- s->ofld_cong_defer = err_stats.ofld_cong_defer;
-
if (!is_t4(adap->params.chip)) {
int v;
@@ -379,36 +291,6 @@ static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
}
}
-static void collect_channel_stats(struct adapter *adap, struct channel_stats *s,
- u8 i)
-{
- struct tp_cpl_stats cpl_stats;
- struct tp_err_stats err_stats;
- struct tp_fcoe_stats fcoe_stats;
-
- memset(s, 0, sizeof(*s));
-
- spin_lock(&adap->stats_lock);
- t4_tp_get_cpl_stats(adap, &cpl_stats, false);
- t4_tp_get_err_stats(adap, &err_stats, false);
- t4_get_fcoe_stats(adap, i, &fcoe_stats, false);
- spin_unlock(&adap->stats_lock);
-
- s->cpl_req = cpl_stats.req[i];
- s->cpl_rsp = cpl_stats.rsp[i];
- s->mac_in_errs = err_stats.mac_in_errs[i];
- s->hdr_in_errs = err_stats.hdr_in_errs[i];
- s->tcp_in_errs = err_stats.tcp_in_errs[i];
- s->tcp6_in_errs = err_stats.tcp6_in_errs[i];
- s->tnl_cong_drops = err_stats.tnl_cong_drops[i];
- s->tnl_tx_drops = err_stats.tnl_tx_drops[i];
- s->ofld_vlan_drops = err_stats.ofld_vlan_drops[i];
- s->ofld_chan_drops = err_stats.ofld_chan_drops[i];
- s->octets_ddp = fcoe_stats.octets_ddp;
- s->frames_ddp = fcoe_stats.frames_ddp;
- s->frames_drop = fcoe_stats.frames_drop;
-}
-
static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
u64 *data)
{
@@ -429,11 +311,6 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
data += sizeof(struct adapter_stats) / sizeof(u64);
*data++ = (u64)pi->port_id;
- collect_channel_stats(adapter, (struct channel_stats *)data,
- pi->port_id);
- data += sizeof(struct channel_stats) / sizeof(u64);
-
- *data++ = (u64)pi->port_id;
memset(&s, 0, sizeof(s));
t4_get_lb_stats(adapter, pi->port_id, &s);
@@ -751,13 +628,10 @@ static int get_link_ksettings(struct net_device *dev,
fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps,
link_ksettings->link_modes.lp_advertising);
- if (netif_carrier_ok(dev)) {
- base->speed = pi->link_cfg.speed;
- base->duplex = DUPLEX_FULL;
- } else {
- base->speed = SPEED_UNKNOWN;
- base->duplex = DUPLEX_UNKNOWN;
- }
+ base->speed = (netif_carrier_ok(dev)
+ ? pi->link_cfg.speed
+ : SPEED_UNKNOWN);
+ base->duplex = DUPLEX_FULL;
if (pi->link_cfg.fc & PAUSE_RX) {
if (pi->link_cfg.fc & PAUSE_TX) {
@@ -1499,6 +1373,36 @@ static int cxgb4_get_module_eeprom(struct net_device *dev,
offset, len, &data[eprom->len - len]);
}
+static u32 cxgb4_get_priv_flags(struct net_device *netdev)
+{
+ struct port_info *pi = netdev_priv(netdev);
+ struct adapter *adapter = pi->adapter;
+
+ return (adapter->eth_flags | pi->eth_flags);
+}
+
+/**
+ * set_flags - set/unset specified flags if passed in new_flags
+ * @cur_flags: pointer to current flags
+ * @new_flags: new incoming flags
+ * @flags: set of flags to set/unset
+ */
+static inline void set_flags(u32 *cur_flags, u32 new_flags, u32 flags)
+{
+ *cur_flags = (*cur_flags & ~flags) | (new_flags & flags);
+}
+
+static int cxgb4_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+ struct port_info *pi = netdev_priv(netdev);
+ struct adapter *adapter = pi->adapter;
+
+ set_flags(&adapter->eth_flags, flags, PRIV_FLAGS_ADAP);
+ set_flags(&pi->eth_flags, flags, PRIV_FLAGS_PORT);
+
+ return 0;
+}
+
static const struct ethtool_ops cxgb_ethtool_ops = {
.get_link_ksettings = get_link_ksettings,
.set_link_ksettings = set_link_ksettings,
@@ -1535,6 +1439,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.get_dump_data = get_dump_data,
.get_module_info = cxgb4_get_module_info,
.get_module_eeprom = cxgb4_get_module_eeprom,
+ .get_priv_flags = cxgb4_get_priv_flags,
+ .set_priv_flags = cxgb4_set_priv_flags,
};
void cxgb4_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index a8926e97935e..961e3087d1d3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -267,7 +267,7 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
}
}
-static int cxgb4_dcb_enabled(const struct net_device *dev)
+int cxgb4_dcb_enabled(const struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
@@ -554,10 +554,9 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
dev = q->adap->port[q->adap->chan_map[port]];
dcbxdis = (action == FW_PORT_ACTION_GET_PORT_INFO
- ? !!(pcmd->u.info.dcbxdis_pkd &
- FW_PORT_CMD_DCBXDIS_F)
- : !!(pcmd->u.info32.lstatus32_to_cbllen32 &
- FW_PORT_CMD_DCBXDIS32_F));
+ ? !!(pcmd->u.info.dcbxdis_pkd & FW_PORT_CMD_DCBXDIS_F)
+ : !!(be32_to_cpu(pcmd->u.info32.lstatus32_to_cbllen32)
+ & FW_PORT_CMD_DCBXDIS32_F));
state_input = (dcbxdis
? CXGB4_DCB_INPUT_FW_DISABLED
: CXGB4_DCB_INPUT_FW_ENABLED);
@@ -924,12 +923,14 @@ static int setup_sge_queues(struct adapter *adap)
QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
return 0;
freeout:
+ dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n", -err);
t4_free_sge_resources(adap);
return err;
}
static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
int txq;
@@ -971,7 +972,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
return txq;
}
- return fallback(dev, skb) % dev->real_num_tx_queues;
+ return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
}
static int closest_timer(const struct sge *s, int time)
@@ -3016,7 +3017,7 @@ static int cxgb_setup_tc_block(struct net_device *dev,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, cxgb_setup_tc_block_cb,
- pi, dev);
+ pi, dev, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, cxgb_setup_tc_block_cb, pi);
return 0;
@@ -3219,7 +3220,7 @@ static netdev_features_t cxgb_fix_features(struct net_device *dev,
static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_open = cxgb_open,
.ndo_stop = cxgb_close,
- .ndo_start_xmit = t4_eth_xmit,
+ .ndo_start_xmit = t4_start_xmit,
.ndo_select_queue = cxgb_select_queue,
.ndo_get_stats64 = cxgb_get_stats,
.ndo_set_rx_mode = cxgb_set_rxmode,
@@ -3538,6 +3539,16 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
u32 v;
int ret;
+ /* Now that we've successfully configured and initialized the adapter
+ * can ask the Firmware what resources it has provisioned for us.
+ */
+ ret = t4_get_pfres(adap);
+ if (ret) {
+ dev_err(adap->pdev_dev,
+ "Unable to retrieve resource provisioning information\n");
+ return ret;
+ }
+
/* get device capabilities */
memset(c, 0, sizeof(*c));
c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
@@ -4172,32 +4183,6 @@ static int adap_init0(struct adapter *adap)
goto bye;
}
- /*
- * Grab VPD parameters. This should be done after we establish a
- * connection to the firmware since some of the VPD parameters
- * (notably the Core Clock frequency) are retrieved via requests to
- * the firmware. On the other hand, we need these fairly early on
- * so we do this right after getting ahold of the firmware.
- */
- ret = t4_get_vpd_params(adap, &adap->params.vpd);
- if (ret < 0)
- goto bye;
-
- /*
- * Find out what ports are available to us. Note that we need to do
- * this before calling adap_init0_no_config() since it needs nports
- * and portvec ...
- */
- v =
- FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
- ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
- if (ret < 0)
- goto bye;
-
- adap->params.nports = hweight32(port_vec);
- adap->params.portvec = port_vec;
-
/* If the firmware is initialized already, emit a simply note to that
* effect. Otherwise, it's time to try initializing the adapter.
*/
@@ -4248,6 +4233,45 @@ static int adap_init0(struct adapter *adap)
}
}
+ /* Now that we've successfully configured and initialized the adapter
+ * (or found it already initialized), we can ask the Firmware what
+ * resources it has provisioned for us.
+ */
+ ret = t4_get_pfres(adap);
+ if (ret) {
+ dev_err(adap->pdev_dev,
+ "Unable to retrieve resource provisioning information\n");
+ goto bye;
+ }
+
+ /* Grab VPD parameters. This should be done after we establish a
+ * connection to the firmware since some of the VPD parameters
+ * (notably the Core Clock frequency) are retrieved via requests to
+ * the firmware. On the other hand, we need these fairly early on
+ * so we do this right after getting ahold of the firmware.
+ *
+ * We need to do this after initializing the adapter because someone
+ * could have FLASHed a new VPD which won't be read by the firmware
+ * until we do the RESET ...
+ */
+ ret = t4_get_vpd_params(adap, &adap->params.vpd);
+ if (ret < 0)
+ goto bye;
+
+ /* Find out what ports are available to us. Note that we need to do
+ * this before calling adap_init0_no_config() since it needs nports
+ * and portvec ...
+ */
+ v =
+ FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
+ if (ret < 0)
+ goto bye;
+
+ adap->params.nports = hweight32(port_vec);
+ adap->params.portvec = port_vec;
+
/* Give the SGE code a chance to pull in anything that it needs ...
* Note that this must be called after we retrieve our VPD parameters
* in order to know how to convert core ticks to seconds, etc.
@@ -4799,10 +4823,12 @@ static inline bool is_x_10g_port(const struct link_config *lc)
* of ports we found and the number of available CPUs. Most settings can be
* modified by the admin prior to actual use.
*/
-static void cfg_queues(struct adapter *adap)
+static int cfg_queues(struct adapter *adap)
{
struct sge *s = &adap->sge;
- int i = 0, n10g = 0, qidx = 0;
+ int i, n10g = 0, qidx = 0;
+ int niqflint, neq, avail_eth_qsets;
+ int max_eth_qsets = 32;
#ifndef CONFIG_CHELSIO_T4_DCB
int q10g = 0;
#endif
@@ -4814,16 +4840,46 @@ static void cfg_queues(struct adapter *adap)
adap->params.crypto = 0;
}
- n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
+ /* Calculate the number of Ethernet Queue Sets available based on
+ * resources provisioned for us. We always have an Asynchronous
+ * Firmware Event Ingress Queue. If we're operating in MSI or Legacy
+ * IRQ Pin Interrupt mode, then we'll also have a Forwarded Interrupt
+ * Ingress Queue. Meanwhile, we need two Egress Queues for each
+ * Queue Set: one for the Free List and one for the Ethernet TX Queue.
+ *
+ * Note that we should also take into account all of the various
+ * Offload Queues. But, in any situation where we're operating in
+ * a Resource Constrained Provisioning environment, doing any Offload
+ * at all is problematic ...
+ */
+ niqflint = adap->params.pfres.niqflint - 1;
+ if (!(adap->flags & USING_MSIX))
+ niqflint--;
+ neq = adap->params.pfres.neq / 2;
+ avail_eth_qsets = min(niqflint, neq);
+
+ if (avail_eth_qsets > max_eth_qsets)
+ avail_eth_qsets = max_eth_qsets;
+
+ if (avail_eth_qsets < adap->params.nports) {
+ dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n",
+ avail_eth_qsets, adap->params.nports);
+ return -ENOMEM;
+ }
+
+ /* Count the number of 10Gb/s or better ports */
+ for_each_port(adap, i)
+ n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
+
#ifdef CONFIG_CHELSIO_T4_DCB
/* For Data Center Bridging support we need to be able to support up
* to 8 Traffic Priorities; each of which will be assigned to its
* own TX Queue in order to prevent Head-Of-Line Blocking.
*/
- if (adap->params.nports * 8 > MAX_ETH_QSETS) {
- dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
- MAX_ETH_QSETS, adap->params.nports * 8);
- BUG_ON(1);
+ if (adap->params.nports * 8 > avail_eth_qsets) {
+ dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n",
+ avail_eth_qsets, adap->params.nports * 8);
+ return -ENOMEM;
}
for_each_port(adap, i) {
@@ -4839,7 +4895,7 @@ static void cfg_queues(struct adapter *adap)
* per 10G port.
*/
if (n10g)
- q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
+ q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
if (q10g > netif_get_num_default_rss_queues())
q10g = netif_get_num_default_rss_queues();
@@ -4890,6 +4946,8 @@ static void cfg_queues(struct adapter *adap)
init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
init_rspq(adap, &s->intrq, 0, 1, 512, 64);
+
+ return 0;
}
/*
@@ -5086,17 +5144,9 @@ static void print_port_info(const struct net_device *dev)
{
char buf[80];
char *bufp = buf;
- const char *spd = "";
const struct port_info *pi = netdev_priv(dev);
const struct adapter *adap = pi->adapter;
- if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
- spd = " 2.5 GT/s";
- else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
- spd = " 5 GT/s";
- else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
- spd = " 8 GT/s";
-
if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
bufp += sprintf(bufp, "100M/");
if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
@@ -5600,6 +5650,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_CHELSIO_T4_DCB
netdev->dcbnl_ops = &cxgb4_dcb_ops;
cxgb4_dcb_state_init(netdev);
+ cxgb4_dcb_version_init(netdev);
#endif
cxgb4_set_ethtool_ops(netdev);
}
@@ -5630,10 +5681,15 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
+ if (!(adapter->flags & FW_OK))
+ goto fw_attach_fail;
+
/* Configure queues and allocate tables now, they can be needed as
* soon as the first register_netdev completes.
*/
- cfg_queues(adapter);
+ err = cfg_queues(adapter);
+ if (err)
+ goto out_free_dev;
adapter->smt = t4_init_smt();
if (!adapter->smt) {
@@ -5705,7 +5761,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
u32 hash_base, hash_reg;
- if (chip <= CHELSIO_T5) {
+ if (chip_ver <= CHELSIO_T5) {
hash_reg = LE_DB_TID_HASHBASE_A;
hash_base = t4_read_reg(adapter, hash_reg);
adapter->tids.hash_base = hash_base / 4;
@@ -5740,6 +5796,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_free_dev;
}
+fw_attach_fail:
/*
* The card is now ready to go. If any errors occur during device
* registration we do not fail the whole card but rather proceed only
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 3ddd2c4acf68..623f73dd7738 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -874,6 +874,9 @@ int cxgb4_init_tc_flower(struct adapter *adap)
{
int ret;
+ if (adap->tc_flower_initialized)
+ return -EEXIST;
+
adap->flower_ht_params = cxgb4_tc_flower_ht_params;
ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params);
if (ret)
@@ -882,13 +885,18 @@ int cxgb4_init_tc_flower(struct adapter *adap)
INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler);
timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0);
mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
+ adap->tc_flower_initialized = true;
return 0;
}
void cxgb4_cleanup_tc_flower(struct adapter *adap)
{
+ if (!adap->tc_flower_initialized)
+ return;
+
if (adap->flower_stats_timer.function)
del_timer_sync(&adap->flower_stats_timer);
cancel_work_sync(&adap->flower_stats_work);
rhashtable_destroy(&adap->flower_tbl);
+ adap->tc_flower_initialized = false;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 77c2c538b1fd..301c4df8a566 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -231,6 +231,7 @@ again:
if (e->state == L2T_STATE_STALE)
e->state = L2T_STATE_VALID;
spin_unlock_bh(&e->lock);
+ /* fall through */
case L2T_STATE_VALID: /* fast-path, send the packet on */
return t4_ofld_send(adap, skb);
case L2T_STATE_RESOLVING:
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index 9148abb7994c..7fc656680299 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -539,6 +539,9 @@ void t4_cleanup_sched(struct adapter *adap)
struct port_info *pi = netdev2pinfo(adap->port[j]);
s = pi->sched_tbl;
+ if (!s)
+ continue;
+
for (i = 0; i < s->sched_size; i++) {
struct sched_class *e;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 395e2a0e8d7f..6807bc3a44fb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1288,13 +1288,13 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb,
}
/**
- * t4_eth_xmit - add a packet to an Ethernet Tx queue
+ * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
* @skb: the packet
* @dev: the egress net device
*
* Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
*/
-netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
u32 wr_mid, ctrl0, op;
u64 cntrl, *end, *sgl;
@@ -1547,6 +1547,374 @@ out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
+/* Constants ... */
+enum {
+ /* Egress Queue sizes, producer and consumer indices are all in units
+ * of Egress Context Units bytes. Note that as far as the hardware is
+ * concerned, the free list is an Egress Queue (the host produces free
+ * buffers which the hardware consumes) and free list entries are
+ * 64-bit PCI DMA addresses.
+ */
+ EQ_UNIT = SGE_EQ_IDXSIZE,
+ FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
+ TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
+
+ T4VF_ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+ sizeof(struct cpl_tx_pkt_lso_core) +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
+};
+
+/**
+ * t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data?
+ * @skb: the packet
+ *
+ * Returns whether an Ethernet packet is small enough to fit completely as
+ * immediate data.
+ */
+static inline int t4vf_is_eth_imm(const struct sk_buff *skb)
+{
+ /* The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
+ * which does not accommodate immediate data. We could dike out all
+ * of the support code for immediate data but that would tie our hands
+ * too much if we ever want to enhace the firmware. It would also
+ * create more differences between the PF and VF Drivers.
+ */
+ return false;
+}
+
+/**
+ * t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR
+ * @skb: the packet
+ *
+ * Returns the number of flits needed for a TX Work Request for the
+ * given Ethernet packet, including the needed WR and CPL headers.
+ */
+static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb)
+{
+ unsigned int flits;
+
+ /* If the skb is small enough, we can pump it out as a work request
+ * with only immediate data. In that case we just have to have the
+ * TX Packet header plus the skb data in the Work Request.
+ */
+ if (t4vf_is_eth_imm(skb))
+ return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
+ sizeof(__be64));
+
+ /* Otherwise, we're going to have to construct a Scatter gather list
+ * of the skb body and fragments. We also include the flits necessary
+ * for the TX Packet Work Request and CPL. We always have a firmware
+ * Write Header (incorporated as part of the cpl_tx_pkt_lso and
+ * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
+ * message or, if we're doing a Large Send Offload, an LSO CPL message
+ * with an embedded TX Packet Write CPL message.
+ */
+ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
+ if (skb_shinfo(skb)->gso_size)
+ flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+ sizeof(struct cpl_tx_pkt_lso_core) +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ else
+ flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ return flits;
+}
+
+/**
+ * cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue
+ * @skb: the packet
+ * @dev: the egress net device
+ *
+ * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled.
+ */
+static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+ const struct skb_shared_info *ssi;
+ struct fw_eth_tx_pkt_vm_wr *wr;
+ int qidx, credits, max_pkt_len;
+ struct cpl_tx_pkt_core *cpl;
+ const struct port_info *pi;
+ unsigned int flits, ndesc;
+ struct sge_eth_txq *txq;
+ struct adapter *adapter;
+ u64 cntrl, *end;
+ u32 wr_mid;
+ const size_t fw_hdr_copy_len = sizeof(wr->ethmacdst) +
+ sizeof(wr->ethmacsrc) +
+ sizeof(wr->ethtype) +
+ sizeof(wr->vlantci);
+
+ /* The chip minimum packet length is 10 octets but the firmware
+ * command that we are using requires that we copy the Ethernet header
+ * (including the VLAN tag) into the header so we reject anything
+ * smaller than that ...
+ */
+ if (unlikely(skb->len < fw_hdr_copy_len))
+ goto out_free;
+
+ /* Discard the packet if the length is greater than mtu */
+ max_pkt_len = ETH_HLEN + dev->mtu;
+ if (skb_vlan_tag_present(skb))
+ max_pkt_len += VLAN_HLEN;
+ if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
+ goto out_free;
+
+ /* Figure out which TX Queue we're going to use. */
+ pi = netdev_priv(dev);
+ adapter = pi->adapter;
+ qidx = skb_get_queue_mapping(skb);
+ WARN_ON(qidx >= pi->nqsets);
+ txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
+
+ /* Take this opportunity to reclaim any TX Descriptors whose DMA
+ * transfers have completed.
+ */
+ cxgb4_reclaim_completed_tx(adapter, &txq->q, true);
+
+ /* Calculate the number of flits and TX Descriptors we're going to
+ * need along with how many TX Descriptors will be left over after
+ * we inject our Work Request.
+ */
+ flits = t4vf_calc_tx_flits(skb);
+ ndesc = flits_to_desc(flits);
+ credits = txq_avail(&txq->q) - ndesc;
+
+ if (unlikely(credits < 0)) {
+ /* Not enough room for this packet's Work Request. Stop the
+ * TX Queue and return a "busy" condition. The queue will get
+ * started later on when the firmware informs us that space
+ * has opened up.
+ */
+ eth_txq_stop(txq);
+ dev_err(adapter->pdev_dev,
+ "%s: TX ring %u full while queue awake!\n",
+ dev->name, qidx);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (!t4vf_is_eth_imm(skb) &&
+ unlikely(cxgb4_map_skb(adapter->pdev_dev, skb, addr) < 0)) {
+ /* We need to map the skb into PCI DMA space (because it can't
+ * be in-lined directly into the Work Request) and the mapping
+ * operation failed. Record the error and drop the packet.
+ */
+ txq->mapping_err++;
+ goto out_free;
+ }
+
+ wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ /* After we're done injecting the Work Request for this
+ * packet, we'll be below our "stop threshold" so stop the TX
+ * Queue now and schedule a request for an SGE Egress Queue
+ * Update message. The queue will get started later on when
+ * the firmware processes this Work Request and sends us an
+ * Egress Queue Status Update message indicating that space
+ * has opened up.
+ */
+ eth_txq_stop(txq);
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
+ /* Start filling in our Work Request. Note that we do _not_ handle
+ * the WR Header wrapping around the TX Descriptor Ring. If our
+ * maximum header size ever exceeds one TX Descriptor, we'll need to
+ * do something else here.
+ */
+ WARN_ON(DIV_ROUND_UP(T4VF_ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
+ wr = (void *)&txq->q.desc[txq->q.pidx];
+ wr->equiq_to_len16 = cpu_to_be32(wr_mid);
+ wr->r3[0] = cpu_to_be32(0);
+ wr->r3[1] = cpu_to_be32(0);
+ skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
+ end = (u64 *)wr + flits;
+
+ /* If this is a Large Send Offload packet we'll put in an LSO CPL
+ * message with an encapsulated TX Packet CPL message. Otherwise we
+ * just use a TX Packet CPL message.
+ */
+ ssi = skb_shinfo(skb);
+ if (ssi->gso_size) {
+ struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
+ bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
+ int l3hdr_len = skb_network_header_len(skb);
+ int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+
+ wr->op_immdlen =
+ cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
+ FW_WR_IMMDLEN_V(sizeof(*lso) +
+ sizeof(*cpl)));
+ /* Fill in the LSO CPL message. */
+ lso->lso_ctrl =
+ cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE_F |
+ LSO_LAST_SLICE_F |
+ LSO_IPV6_V(v6) |
+ LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+ LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+ LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
+ lso->ipid_ofst = cpu_to_be16(0);
+ lso->mss = cpu_to_be16(ssi->gso_size);
+ lso->seqno_offset = cpu_to_be32(0);
+ if (is_t4(adapter->params.chip))
+ lso->len = cpu_to_be32(skb->len);
+ else
+ lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
+
+ /* Set up TX Packet CPL pointer, control word and perform
+ * accounting.
+ */
+ cpl = (void *)(lso + 1);
+
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+ else
+ cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+
+ cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
+ TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+ TXPKT_IPHDR_LEN_V(l3hdr_len);
+ txq->tso++;
+ txq->tx_cso += ssi->gso_segs;
+ } else {
+ int len;
+
+ len = (t4vf_is_eth_imm(skb)
+ ? skb->len + sizeof(*cpl)
+ : sizeof(*cpl));
+ wr->op_immdlen =
+ cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
+ FW_WR_IMMDLEN_V(len));
+
+ /* Set up TX Packet CPL pointer, control word and perform
+ * accounting.
+ */
+ cpl = (void *)(wr + 1);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ cntrl = hwcsum(adapter->params.chip, skb) |
+ TXPKT_IPCSUM_DIS_F;
+ txq->tx_cso++;
+ } else {
+ cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
+ }
+ }
+
+ /* If there's a VLAN tag present, add that to the list of things to
+ * do in this Work Request.
+ */
+ if (skb_vlan_tag_present(skb)) {
+ txq->vlan_ins++;
+ cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
+ }
+
+ /* Fill in the TX Packet CPL message header. */
+ cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
+ TXPKT_INTF_V(pi->port_id) |
+ TXPKT_PF_V(0));
+ cpl->pack = cpu_to_be16(0);
+ cpl->len = cpu_to_be16(skb->len);
+ cpl->ctrl1 = cpu_to_be64(cntrl);
+
+ /* Fill in the body of the TX Packet CPL message with either in-lined
+ * data or a Scatter/Gather List.
+ */
+ if (t4vf_is_eth_imm(skb)) {
+ /* In-line the packet's data and free the skb since we don't
+ * need it any longer.
+ */
+ cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1);
+ dev_consume_skb_any(skb);
+ } else {
+ /* Write the skb's Scatter/Gather list into the TX Packet CPL
+ * message and retain a pointer to the skb so we can free it
+ * later when its DMA completes. (We store the skb pointer
+ * in the Software Descriptor corresponding to the last TX
+ * Descriptor used by the Work Request.)
+ *
+ * The retained skb will be freed when the corresponding TX
+ * Descriptors are reclaimed after their DMAs complete.
+ * However, this could take quite a while since, in general,
+ * the hardware is set up to be lazy about sending DMA
+ * completion notifications to us and we mostly perform TX
+ * reclaims in the transmit routine.
+ *
+ * This is good for performamce but means that we rely on new
+ * TX packets arriving to run the destructors of completed
+ * packets, which open up space in their sockets' send queues.
+ * Sometimes we do not get such new packets causing TX to
+ * stall. A single UDP transmitter is a good example of this
+ * situation. We have a clean up timer that periodically
+ * reclaims completed packets but it doesn't run often enough
+ * (nor do we want it to) to prevent lengthy stalls. A
+ * solution to this problem is to run the destructor early,
+ * after the packet is queued but before it's DMAd. A con is
+ * that we lie to socket memory accounting, but the amount of
+ * extra memory is reasonable (limited by the number of TX
+ * descriptors), the packets do actually get freed quickly by
+ * new packets almost always, and for protocols like TCP that
+ * wait for acks to really free up the data the extra memory
+ * is even less. On the positive side we run the destructors
+ * on the sending CPU rather than on a potentially different
+ * completing CPU, usually a good thing.
+ *
+ * Run the destructor before telling the DMA engine about the
+ * packet to make sure it doesn't complete and get freed
+ * prematurely.
+ */
+ struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
+ struct sge_txq *tq = &txq->q;
+ int last_desc;
+
+ /* If the Work Request header was an exact multiple of our TX
+ * Descriptor length, then it's possible that the starting SGL
+ * pointer lines up exactly with the end of our TX Descriptor
+ * ring. If that's the case, wrap around to the beginning
+ * here ...
+ */
+ if (unlikely((void *)sgl == (void *)tq->stat)) {
+ sgl = (void *)tq->desc;
+ end = (void *)((void *)tq->desc +
+ ((void *)end - (void *)tq->stat));
+ }
+
+ cxgb4_write_sgl(skb, tq, sgl, end, 0, addr);
+ skb_orphan(skb);
+
+ last_desc = tq->pidx + ndesc - 1;
+ if (last_desc >= tq->size)
+ last_desc -= tq->size;
+ tq->sdesc[last_desc].skb = skb;
+ tq->sdesc[last_desc].sgl = sgl;
+ }
+
+ /* Advance our internal TX Queue state, tell the hardware about
+ * the new TX descriptors and return success.
+ */
+ txq_advance(&txq->q, ndesc);
+
+ cxgb4_ring_tx_db(adapter, &txq->q, ndesc);
+ return NETDEV_TX_OK;
+
+out_free:
+ /* An error of some sort happened. Free the TX skb and tell the
+ * OS that we've "dealt" with the packet ...
+ */
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+
+ if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM))
+ return cxgb4_vf_eth_xmit(skb, dev);
+
+ return cxgb4_eth_xmit(skb, dev);
+}
+
/**
* reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
* @q: the SGE control Tx queue
@@ -3044,7 +3412,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
c.iqsize = htons(iq->size);
c.iqaddr = cpu_to_be64(iq->phys_addr);
if (cong >= 0)
- c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F);
+ c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F |
+ FW_IQ_CMD_IQTYPE_V(cong ? FW_IQ_IQTYPE_NIC
+ : FW_IQ_IQTYPE_OFLD));
if (fl) {
enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 3720c3e11ebb..5fe5d16dee72 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2882,6 +2882,57 @@ int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
return 0;
}
+/**
+ * t4_get_pfres - retrieve VF resource limits
+ * @adapter: the adapter
+ *
+ * Retrieves configured resource limits and capabilities for a physical
+ * function. The results are stored in @adapter->pfres.
+ */
+int t4_get_pfres(struct adapter *adapter)
+{
+ struct pf_resources *pfres = &adapter->params.pfres;
+ struct fw_pfvf_cmd cmd, rpl;
+ int v;
+ u32 word;
+
+ /* Execute PFVF Read command to get VF resource limits; bail out early
+ * with error on command failure.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
+ FW_PFVF_CMD_PFN_V(adapter->pf) |
+ FW_PFVF_CMD_VFN_V(0));
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl);
+ if (v != FW_SUCCESS)
+ return v;
+
+ /* Extract PF resource limits and return success.
+ */
+ word = be32_to_cpu(rpl.niqflint_niq);
+ pfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word);
+ pfres->niq = FW_PFVF_CMD_NIQ_G(word);
+
+ word = be32_to_cpu(rpl.type_to_neq);
+ pfres->neq = FW_PFVF_CMD_NEQ_G(word);
+ pfres->pmask = FW_PFVF_CMD_PMASK_G(word);
+
+ word = be32_to_cpu(rpl.tc_to_nexactf);
+ pfres->tc = FW_PFVF_CMD_TC_G(word);
+ pfres->nvi = FW_PFVF_CMD_NVI_G(word);
+ pfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word);
+
+ word = be32_to_cpu(rpl.r_caps_to_nethctrl);
+ pfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word);
+ pfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word);
+ pfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word);
+
+ return 0;
+}
+
/* serial flash and firmware constants */
enum {
SF_ATTEMPTS = 10, /* max retries for SF operations */
@@ -7453,10 +7504,13 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
switch (nmac) {
case 5:
memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
+ /* Fall through */
case 4:
memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
+ /* Fall through */
case 3:
memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
+ /* Fall through */
case 2:
memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index c7f8d0441278..60df66f4d21c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -188,6 +188,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */
CH_PCI_ID_TABLE_FENTRY(0x50ac), /* Custom T540-BT */
CH_PCI_ID_TABLE_FENTRY(0x50ad), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50ae), /* Custom T540-XL-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x50af), /* Custom T580-KR-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x50b0), /* Custom T520-CR-LOM */
/* T6 adapters:
*/
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 6b55aa2eb2a5..eb222d40ddbf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -1502,6 +1502,25 @@
#define TP_MIB_DATA_A 0x7e54
#define TP_INT_CAUSE_A 0x7e74
+#define TP_FLM_FREE_PS_CNT_A 0x7e80
+#define TP_FLM_FREE_RX_CNT_A 0x7e84
+
+#define FREEPSTRUCTCOUNT_S 0
+#define FREEPSTRUCTCOUNT_M 0x1fffffU
+#define FREEPSTRUCTCOUNT_G(x) (((x) >> FREEPSTRUCTCOUNT_S) & FREEPSTRUCTCOUNT_M)
+
+#define FREERXPAGECOUNT_S 0
+#define FREERXPAGECOUNT_M 0x1fffffU
+#define FREERXPAGECOUNT_V(x) ((x) << FREERXPAGECOUNT_S)
+#define FREERXPAGECOUNT_G(x) (((x) >> FREERXPAGECOUNT_S) & FREERXPAGECOUNT_M)
+
+#define TP_FLM_FREE_TX_CNT_A 0x7e88
+
+#define FREETXPAGECOUNT_S 0
+#define FREETXPAGECOUNT_M 0x1fffffU
+#define FREETXPAGECOUNT_V(x) ((x) << FREETXPAGECOUNT_S)
+#define FREETXPAGECOUNT_G(x) (((x) >> FREETXPAGECOUNT_S) & FREETXPAGECOUNT_M)
+
#define FLMTXFLSTEMPTY_S 30
#define FLMTXFLSTEMPTY_V(x) ((x) << FLMTXFLSTEMPTY_S)
#define FLMTXFLSTEMPTY_F FLMTXFLSTEMPTY_V(1U)
@@ -1683,6 +1702,16 @@
#define ULP_TX_LA_RDPTR_0_A 0x8ec0
#define ULP_TX_LA_RDDATA_0_A 0x8ec4
#define ULP_TX_LA_WRPTR_0_A 0x8ec8
+#define ULP_TX_ASIC_DEBUG_CTRL_A 0x8f70
+
+#define ULP_TX_ASIC_DEBUG_0_A 0x8f74
+#define ULP_TX_ASIC_DEBUG_1_A 0x8f78
+#define ULP_TX_ASIC_DEBUG_2_A 0x8f7c
+#define ULP_TX_ASIC_DEBUG_3_A 0x8f80
+#define ULP_TX_ASIC_DEBUG_4_A 0x8f84
+
+/* registers for module PM_RX */
+#define PM_RX_BASE_ADDR 0x8fc0
#define PMRX_E_PCMD_PAR_ERROR_S 0
#define PMRX_E_PCMD_PAR_ERROR_V(x) ((x) << PMRX_E_PCMD_PAR_ERROR_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index f1967cf6d43c..5dc6c4154af8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1472,6 +1472,12 @@ enum fw_iq_type {
FW_IQ_TYPE_NO_FL_INT_CAP
};
+enum fw_iq_iqtype {
+ FW_IQ_IQTYPE_OTHER,
+ FW_IQ_IQTYPE_NIC,
+ FW_IQ_IQTYPE_OFLD,
+};
+
struct fw_iq_cmd {
__be32 op_to_vfn;
__be32 alloc_to_len16;
@@ -1586,6 +1592,12 @@ struct fw_iq_cmd {
#define FW_IQ_CMD_IQFLINTISCSIC_S 26
#define FW_IQ_CMD_IQFLINTISCSIC_V(x) ((x) << FW_IQ_CMD_IQFLINTISCSIC_S)
+#define FW_IQ_CMD_IQTYPE_S 24
+#define FW_IQ_CMD_IQTYPE_M 0x3
+#define FW_IQ_CMD_IQTYPE_V(x) ((x) << FW_IQ_CMD_IQTYPE_S)
+#define FW_IQ_CMD_IQTYPE_G(x) \
+ (((x) >> FW_IQ_CMD_IQTYPE_S) & FW_IQ_CMD_IQTYPE_M)
+
#define FW_IQ_CMD_FL0CNGCHMAP_S 20
#define FW_IQ_CMD_FL0CNGCHMAP_V(x) ((x) << FW_IQ_CMD_FL0CNGCHMAP_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index 4eb15ceddca3..a844296135b4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -36,8 +36,8 @@
#define __T4FW_VERSION_H__
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x13
-#define T4FW_VERSION_MICRO 0x01
+#define T4FW_VERSION_MINOR 0x14
+#define T4FW_VERSION_MICRO 0x08
#define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -45,8 +45,8 @@
#define T4FW_MIN_VERSION_MICRO 0x00
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x13
-#define T5FW_VERSION_MICRO 0x01
+#define T5FW_VERSION_MINOR 0x14
+#define T5FW_VERSION_MICRO 0x08
#define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -54,8 +54,8 @@
#define T5FW_MIN_VERSION_MICRO 0x00
#define T6FW_VERSION_MAJOR 0x01
-#define T6FW_VERSION_MINOR 0x13
-#define T6FW_VERSION_MICRO 0x01
+#define T6FW_VERSION_MINOR 0x14
+#define T6FW_VERSION_MICRO 0x08
#define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
index 0ed161642371..74849be5f004 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
@@ -412,12 +412,10 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
ppmax * (sizeof(struct cxgbi_ppod_data)) +
ppod_bmap_size * sizeof(unsigned long);
- ppm = vmalloc(alloc_sz);
+ ppm = vzalloc(alloc_sz);
if (!ppm)
goto release_ppm_pool;
- memset(ppm, 0, alloc_sz);
-
ppm->ppod_bmap = (unsigned long *)(&ppm->ppod_data[ppmax]);
if ((ppod_bmap_size >> 3) > (ppmax - ppmax_pool)) {
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index e9db811df59c..901e44b0b795 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -1071,7 +1071,7 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
unsigned int num_bars)
{
if (!vdev) {
- vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
+ vdev = kzalloc(sizeof(struct vnic_dev), GFP_KERNEL);
if (!vdev)
return NULL;
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index f8aa326d1d58..a3e7b003ada1 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -35,7 +35,7 @@ static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
for (i = 0; i < blks; i++) {
- rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC);
+ rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_KERNEL);
if (!rq->bufs[i])
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c
index 090cc65658a3..eb75891974df 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c
@@ -35,7 +35,7 @@ static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
for (i = 0; i < blks; i++) {
- wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC);
+ wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_KERNEL);
if (!wq->bufs[i])
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 6d7404f66f84..1c9ad3630c77 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -46,6 +46,11 @@
#define DRV_NAME "gmac-gemini"
#define DRV_VERSION "1.0"
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
#define HSIZE_8 0x00
#define HSIZE_16 0x01
#define HSIZE_32 0x02
@@ -146,6 +151,7 @@ struct gemini_ethernet {
void __iomem *base;
struct gemini_ethernet_port *port0;
struct gemini_ethernet_port *port1;
+ bool initialized;
spinlock_t irq_lock; /* Locks IRQ-related registers */
unsigned int freeq_order;
@@ -300,23 +306,26 @@ static void gmac_speed_set(struct net_device *netdev)
status.bits.speed = GMAC_SPEED_1000;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
status.bits.mii_rmii = GMAC_PHY_RGMII_1000;
- netdev_info(netdev, "connect to RGMII @ 1Gbit\n");
+ netdev_dbg(netdev, "connect %s to RGMII @ 1Gbit\n",
+ phydev_name(phydev));
break;
case 100:
status.bits.speed = GMAC_SPEED_100;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
- netdev_info(netdev, "connect to RGMII @ 100 Mbit\n");
+ netdev_dbg(netdev, "connect %s to RGMII @ 100 Mbit\n",
+ phydev_name(phydev));
break;
case 10:
status.bits.speed = GMAC_SPEED_10;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
- netdev_info(netdev, "connect to RGMII @ 10 Mbit\n");
+ netdev_dbg(netdev, "connect %s to RGMII @ 10 Mbit\n",
+ phydev_name(phydev));
break;
default:
- netdev_warn(netdev, "Not supported PHY speed (%d)\n",
- phydev->speed);
+ netdev_warn(netdev, "Unsupported PHY speed (%d) on %s\n",
+ phydev->speed, phydev_name(phydev));
}
if (phydev->duplex == DUPLEX_FULL) {
@@ -363,12 +372,6 @@ static int gmac_setup_phy(struct net_device *netdev)
return -ENODEV;
netdev->phydev = phy;
- netdev_info(netdev, "connected to PHY \"%s\"\n",
- phydev_name(phy));
- phy_attached_print(phy, "phy_id=0x%.8lx, phy_mode=%s\n",
- (unsigned long)phy->phy_id,
- phy_modes(phy->interface));
-
phy->supported &= PHY_GBIT_FEATURES;
phy->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause;
phy->advertising = phy->supported;
@@ -376,19 +379,19 @@ static int gmac_setup_phy(struct net_device *netdev)
/* set PHY interface type */
switch (phy->interface) {
case PHY_INTERFACE_MODE_MII:
- netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n");
+ netdev_dbg(netdev,
+ "MII: set GMAC0 to GMII mode, GMAC1 disabled\n");
status.bits.mii_rmii = GMAC_PHY_MII;
- netdev_info(netdev, "connect to MII\n");
break;
case PHY_INTERFACE_MODE_GMII:
- netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n");
+ netdev_dbg(netdev,
+ "GMII: set GMAC0 to GMII mode, GMAC1 disabled\n");
status.bits.mii_rmii = GMAC_PHY_GMII;
- netdev_info(netdev, "connect to GMII\n");
break;
case PHY_INTERFACE_MODE_RGMII:
- dev_info(dev, "set GMAC0 and GMAC1 to MII/RGMII mode\n");
+ netdev_dbg(netdev,
+ "RGMII: set GMAC0 and GMAC1 to MII/RGMII mode\n");
status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
- netdev_info(netdev, "connect to RGMII\n");
break;
default:
netdev_err(netdev, "Unsupported MII interface\n");
@@ -398,29 +401,63 @@ static int gmac_setup_phy(struct net_device *netdev)
}
writel(status.bits32, port->gmac_base + GMAC_STATUS);
+ if (netif_msg_link(port))
+ phy_attached_info(phy);
+
return 0;
}
-static int gmac_pick_rx_max_len(int max_l3_len)
-{
- /* index = CONFIG_MAXLEN_XXX values */
- static const int max_len[8] = {
- 1536, 1518, 1522, 1542,
- 9212, 10236, 1518, 1518
- };
- int i, n = 5;
+/* The maximum frame length is not logically enumerated in the
+ * hardware, so we do a table lookup to find the applicable max
+ * frame length.
+ */
+struct gmac_max_framelen {
+ unsigned int max_l3_len;
+ u8 val;
+};
+
+static const struct gmac_max_framelen gmac_maxlens[] = {
+ {
+ .max_l3_len = 1518,
+ .val = CONFIG0_MAXLEN_1518,
+ },
+ {
+ .max_l3_len = 1522,
+ .val = CONFIG0_MAXLEN_1522,
+ },
+ {
+ .max_l3_len = 1536,
+ .val = CONFIG0_MAXLEN_1536,
+ },
+ {
+ .max_l3_len = 1542,
+ .val = CONFIG0_MAXLEN_1542,
+ },
+ {
+ .max_l3_len = 9212,
+ .val = CONFIG0_MAXLEN_9k,
+ },
+ {
+ .max_l3_len = 10236,
+ .val = CONFIG0_MAXLEN_10k,
+ },
+};
- max_l3_len += ETH_HLEN + VLAN_HLEN;
+static int gmac_pick_rx_max_len(unsigned int max_l3_len)
+{
+ const struct gmac_max_framelen *maxlen;
+ int maxtot;
+ int i;
- if (max_l3_len > max_len[n])
- return -1;
+ maxtot = max_l3_len + ETH_HLEN + VLAN_HLEN;
- for (i = 0; i < 5; i++) {
- if (max_len[i] >= max_l3_len && max_len[i] < max_len[n])
- n = i;
+ for (i = 0; i < ARRAY_SIZE(gmac_maxlens); i++) {
+ maxlen = &gmac_maxlens[i];
+ if (maxtot <= maxlen->max_l3_len)
+ return maxlen->val;
}
- return n;
+ return -1;
}
static int gmac_init(struct net_device *netdev)
@@ -1276,8 +1313,8 @@ static void gmac_enable_irq(struct net_device *netdev, int enable)
unsigned long flags;
u32 val, mask;
- netdev_info(netdev, "%s device %d %s\n", __func__,
- netdev->dev_id, enable ? "enable" : "disable");
+ netdev_dbg(netdev, "%s device %d %s\n", __func__,
+ netdev->dev_id, enable ? "enable" : "disable");
spin_lock_irqsave(&geth->irq_lock, flags);
mask = GMAC0_IRQ0_2 << (netdev->dev_id * 2);
@@ -1753,7 +1790,10 @@ static int gmac_open(struct net_device *netdev)
phy_start(netdev->phydev);
err = geth_resize_freeq(port);
- if (err) {
+ /* It's fine if it's just busy, the other port has set up
+ * the freeq in that case.
+ */
+ if (err && (err != -EBUSY)) {
netdev_err(netdev, "could not resize freeq\n");
goto err_stop_phy;
}
@@ -1782,7 +1822,7 @@ static int gmac_open(struct net_device *netdev)
HRTIMER_MODE_REL);
port->rx_coalesce_timer.function = &gmac_coalesce_delay_expired;
- netdev_info(netdev, "opened\n");
+ netdev_dbg(netdev, "opened\n");
return 0;
@@ -2264,6 +2304,14 @@ static void gemini_port_remove(struct gemini_ethernet_port *port)
static void gemini_ethernet_init(struct gemini_ethernet *geth)
{
+ /* Only do this once both ports are online */
+ if (geth->initialized)
+ return;
+ if (geth->port0 && geth->port1)
+ geth->initialized = true;
+ else
+ return;
+
writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_2_REG);
@@ -2354,6 +2402,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
port->id = id;
port->geth = geth;
port->dev = dev;
+ port->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
/* DMA memory */
dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2410,6 +2459,10 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
geth->port0 = port;
else
geth->port1 = port;
+
+ /* This will just be done once both ports are up and reset */
+ gemini_ethernet_init(geth);
+
platform_set_drvdata(pdev, port);
/* Set up and register the netdev */
@@ -2423,6 +2476,11 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
netdev->hw_features = GMAC_OFFLOAD_FEATURES;
netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO;
+ /* We can handle jumbo frames up to 10236 bytes so, let's accept
+ * payloads of 10236 bytes minus VLAN and ethernet header
+ */
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = 10236 - VLAN_ETH_HLEN;
port->freeq_refill = 0;
netif_napi_add(netdev, &port->napi, gmac_napi_poll,
@@ -2435,7 +2493,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
port->mac_addr[0], port->mac_addr[1],
port->mac_addr[2]);
dev_info(dev, "using a random ethernet address\n");
- random_ether_addr(netdev->dev_addr);
+ eth_random_addr(netdev->dev_addr);
}
gmac_write_mac_address(netdev);
@@ -2527,7 +2585,6 @@ static int gemini_ethernet_probe(struct platform_device *pdev)
spin_lock_init(&geth->irq_lock);
spin_lock_init(&geth->freeq_lock);
- gemini_ethernet_init(geth);
/* The children will use this */
platform_set_drvdata(pdev, geth);
@@ -2540,8 +2597,8 @@ static int gemini_ethernet_remove(struct platform_device *pdev)
{
struct gemini_ethernet *geth = platform_get_drvdata(pdev);
- gemini_ethernet_init(geth);
geth_cleanup_freeq(geth);
+ geth->initialized = false;
return 0;
}
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index a31b4df3e7ff..66535d1653f6 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -3204,6 +3204,8 @@ srom_map_media(struct net_device *dev)
case SROM_10BASETF:
if (!lp->params.fdx) return -1;
lp->fdx = true;
+ /* fall through */
+
case SROM_10BASET:
if (lp->params.fdx && !lp->fdx) return -1;
if ((lp->chipset == DC21140) || ((lp->chipset & ~0x00ff) == DC2114x)) {
@@ -3224,6 +3226,8 @@ srom_map_media(struct net_device *dev)
case SROM_100BASETF:
if (!lp->params.fdx) return -1;
lp->fdx = true;
+ /* fall through */
+
case SROM_100BASET:
if (lp->params.fdx && !lp->fdx) return -1;
lp->media = _100Mb;
@@ -3236,6 +3240,8 @@ srom_map_media(struct net_device *dev)
case SROM_100BASEFF:
if (!lp->params.fdx) return -1;
lp->fdx = true;
+ /* fall through */
+
case SROM_100BASEF:
if (lp->params.fdx && !lp->fdx) return -1;
lp->media = _100Mb;
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 00d02a0967d0..3e3e08698876 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -923,6 +923,7 @@ static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
data->phy_id = 1;
else
return -ENODEV;
+ /* Fall through */
case SIOCGMIIREG: /* Read MII PHY register. */
if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig
index b4853ec9de8d..8cf794edd3c3 100644
--- a/drivers/net/ethernet/emulex/benet/Kconfig
+++ b/drivers/net/ethernet/emulex/benet/Kconfig
@@ -1,7 +1,7 @@
config BE2NET
tristate "ServerEngines' 10Gbps NIC - BladeEngine"
depends on PCI
- ---help---
+ help
This driver implements the NIC functionality for ServerEngines'
10Gbps network adapter - BladeEngine.
@@ -10,6 +10,42 @@ config BE2NET_HWMON
depends on BE2NET && HWMON
depends on !(BE2NET=y && HWMON=m)
default y
- ---help---
+ help
Say Y here if you want to expose thermal sensor data on
be2net network adapter.
+
+config BE2NET_BE2
+ bool "Support for BE2 chipsets"
+ depends on BE2NET
+ default y
+ help
+ Say Y here if you want to use devices based on BE2
+ chipsets. (e.g. OneConnect OCe10xxx)
+
+config BE2NET_BE3
+ bool "Support for BE3 chipsets"
+ depends on BE2NET
+ default y
+ help
+ Say Y here if you want to use devices based on BE3
+ chipsets. (e.g. OneConnect OCe11xxx)
+
+config BE2NET_LANCER
+ bool "Support for Lancer chipsets"
+ depends on BE2NET
+ default y
+ help
+ Say Y here if you want to use devices based on Lancer
+ chipsets. (e.g LightPulse LPe12xxx)
+
+config BE2NET_SKYHAWK
+ bool "Support for Skyhawk chipsets"
+ depends on BE2NET
+ default y
+ help
+ Say Y here if you want to use devices based on Skyhawk
+ chipsets. (e.g. OneConnect OCe14xxx)
+
+comment "WARNING: be2net is useless without any enabled chip"
+ depends on BE2NET_BE2=n && BE2NET_BE3=n && BE2NET_LANCER=n && \
+ BE2NET_SKYHAWK=n && BE2NET
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 382891f81e09..58bcee8f0a58 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -37,7 +37,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "11.4.0.0"
+#define DRV_VER "12.0.0.0"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
@@ -185,34 +185,13 @@ static inline void queue_tail_inc(struct be_queue_info *q)
struct be_eq_obj {
struct be_queue_info q;
- char desc[32];
-
- /* Adaptive interrupt coalescing (AIC) info */
- bool enable_aic;
- u32 min_eqd; /* in usecs */
- u32 max_eqd; /* in usecs */
- u32 eqd; /* configured val when aic is off */
- u32 cur_eqd; /* in usecs */
+ struct be_adapter *adapter;
+ struct napi_struct napi;
u8 idx; /* array index */
u8 msix_idx;
u16 spurious_intr;
- struct napi_struct napi;
- struct be_adapter *adapter;
cpumask_var_t affinity_mask;
-
-#ifdef CONFIG_NET_RX_BUSY_POLL
-#define BE_EQ_IDLE 0
-#define BE_EQ_NAPI 1 /* napi owns this EQ */
-#define BE_EQ_POLL 2 /* poll owns this EQ */
-#define BE_EQ_LOCKED (BE_EQ_NAPI | BE_EQ_POLL)
-#define BE_EQ_NAPI_YIELD 4 /* napi yielded this EQ */
-#define BE_EQ_POLL_YIELD 8 /* poll yielded this EQ */
-#define BE_EQ_YIELD (BE_EQ_NAPI_YIELD | BE_EQ_POLL_YIELD)
-#define BE_EQ_USER_PEND (BE_EQ_POLL | BE_EQ_POLL_YIELD)
- unsigned int state;
- spinlock_t lock; /* lock to serialize napi and busy-poll */
-#endif /* CONFIG_NET_RX_BUSY_POLL */
} ____cacheline_aligned_in_smp;
struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
@@ -238,7 +217,6 @@ struct be_tx_stats {
u64 tx_vxlan_offload_pkts;
u64 tx_reqs;
u64 tx_compl;
- ulong tx_jiffies;
u32 tx_stops;
u32 tx_drv_drops; /* pkts dropped by driver */
/* the error counters are described in be_ethtool.c */
@@ -261,9 +239,9 @@ struct be_tx_compl_info {
struct be_tx_obj {
u32 db_offset;
+ struct be_tx_compl_info txcp;
struct be_queue_info q;
struct be_queue_info cq;
- struct be_tx_compl_info txcp;
/* Remember the skbs that were transmitted */
struct sk_buff *sent_skb_list[TX_Q_LEN];
struct be_tx_stats stats;
@@ -458,10 +436,10 @@ struct be_port_resources {
#define be_is_os2bmc_enabled(adapter) (adapter->flags & BE_FLAGS_OS2BMC)
struct rss_info {
- u64 rss_flags;
u8 rsstable[RSS_INDIR_TABLE_LEN];
u8 rss_queue[RSS_INDIR_TABLE_LEN];
u8 rss_hkey[RSS_HASH_KEY_LEN];
+ u64 rss_flags;
};
#define BE_INVALID_DIE_TEMP 0xFF
@@ -544,11 +522,13 @@ enum {
};
struct be_error_recovery {
- /* Lancer error recovery variables */
- u8 recovery_retries;
+ union {
+ u8 recovery_retries; /* used for Lancer */
+ u8 recovery_state; /* used for BEx and Skyhawk */
+ };
/* BEx/Skyhawk error recovery variables */
- u8 recovery_state;
+ bool recovery_supported;
u16 ue_to_reset_time; /* Time after UE, to soft reset
* the chip - PF0 only
*/
@@ -556,7 +536,6 @@ struct be_error_recovery {
* of SLIPORT_SEMAPHORE reg
*/
u16 last_err_code;
- bool recovery_supported;
unsigned long probe_time;
unsigned long last_recovery_time;
@@ -773,17 +752,33 @@ static inline u16 be_max_any_irqs(struct be_adapter *adapter)
/* Is BE in QNQ multi-channel mode */
#define be_is_qnq_mode(adapter) (adapter->function_mode & QNQ_MODE)
+#ifdef CONFIG_BE2NET_LANCER
#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \
adapter->pdev->device == OC_DEVICE_ID4)
+#else
+#define lancer_chip(adapter) (0)
+#endif /* CONFIG_BE2NET_LANCER */
+#ifdef CONFIG_BE2NET_SKYHAWK
#define skyhawk_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID5 || \
adapter->pdev->device == OC_DEVICE_ID6)
+#else
+#define skyhawk_chip(adapter) (0)
+#endif /* CONFIG_BE2NET_SKYHAWK */
+#ifdef CONFIG_BE2NET_BE3
#define BE3_chip(adapter) (adapter->pdev->device == BE_DEVICE_ID2 || \
adapter->pdev->device == OC_DEVICE_ID2)
+#else
+#define BE3_chip(adapter) (0)
+#endif /* CONFIG_BE2NET_BE3 */
+#ifdef CONFIG_BE2NET_BE2
#define BE2_chip(adapter) (adapter->pdev->device == BE_DEVICE_ID1 || \
adapter->pdev->device == OC_DEVICE_ID1)
+#else
+#define BE2_chip(adapter) (0)
+#endif /* CONFIG_BE2NET_BE2 */
#define BEx_chip(adapter) (BE3_chip(adapter) || BE2_chip(adapter))
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 7f7e206f95f8..3f6749fc889f 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -575,6 +575,7 @@ static u32 convert_to_et_setting(struct be_adapter *adapter, u32 if_speeds)
break;
}
}
+ /* fall through */
case PHY_TYPE_SFP_PLUS_10GB:
case PHY_TYPE_XFP_10GB:
case PHY_TYPE_SFP_1GB:
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 8f755009ff38..74d122616e76 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -47,14 +47,22 @@ MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
static struct workqueue_struct *be_err_recovery_workq;
static const struct pci_device_id be_dev_ids[] = {
+#ifdef CONFIG_BE2NET_BE2
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
- { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
+#endif /* CONFIG_BE2NET_BE2 */
+#ifdef CONFIG_BE2NET_BE3
+ { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
+#endif /* CONFIG_BE2NET_BE3 */
+#ifdef CONFIG_BE2NET_LANCER
{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
+#endif /* CONFIG_BE2NET_LANCER */
+#ifdef CONFIG_BE2NET_SKYHAWK
{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
+#endif /* CONFIG_BE2NET_SKYHAWK */
{ 0 }
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -1412,6 +1420,83 @@ drop:
return NETDEV_TX_OK;
}
+static void be_tx_timeout(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->pdev->dev;
+ struct be_tx_obj *txo;
+ struct sk_buff *skb;
+ struct tcphdr *tcphdr;
+ struct udphdr *udphdr;
+ u32 *entry;
+ int status;
+ int i, j;
+
+ for_all_tx_queues(adapter, txo, i) {
+ dev_info(dev, "TXQ Dump: %d H: %d T: %d used: %d, qid: 0x%x\n",
+ i, txo->q.head, txo->q.tail,
+ atomic_read(&txo->q.used), txo->q.id);
+
+ entry = txo->q.dma_mem.va;
+ for (j = 0; j < TX_Q_LEN * 4; j += 4) {
+ if (entry[j] != 0 || entry[j + 1] != 0 ||
+ entry[j + 2] != 0 || entry[j + 3] != 0) {
+ dev_info(dev, "Entry %d 0x%x 0x%x 0x%x 0x%x\n",
+ j, entry[j], entry[j + 1],
+ entry[j + 2], entry[j + 3]);
+ }
+ }
+
+ entry = txo->cq.dma_mem.va;
+ dev_info(dev, "TXCQ Dump: %d H: %d T: %d used: %d\n",
+ i, txo->cq.head, txo->cq.tail,
+ atomic_read(&txo->cq.used));
+ for (j = 0; j < TX_CQ_LEN * 4; j += 4) {
+ if (entry[j] != 0 || entry[j + 1] != 0 ||
+ entry[j + 2] != 0 || entry[j + 3] != 0) {
+ dev_info(dev, "Entry %d 0x%x 0x%x 0x%x 0x%x\n",
+ j, entry[j], entry[j + 1],
+ entry[j + 2], entry[j + 3]);
+ }
+ }
+
+ for (j = 0; j < TX_Q_LEN; j++) {
+ if (txo->sent_skb_list[j]) {
+ skb = txo->sent_skb_list[j];
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP) {
+ tcphdr = tcp_hdr(skb);
+ dev_info(dev, "TCP source port %d\n",
+ ntohs(tcphdr->source));
+ dev_info(dev, "TCP dest port %d\n",
+ ntohs(tcphdr->dest));
+ dev_info(dev, "TCP sequence num %d\n",
+ ntohs(tcphdr->seq));
+ dev_info(dev, "TCP ack_seq %d\n",
+ ntohs(tcphdr->ack_seq));
+ } else if (ip_hdr(skb)->protocol ==
+ IPPROTO_UDP) {
+ udphdr = udp_hdr(skb);
+ dev_info(dev, "UDP source port %d\n",
+ ntohs(udphdr->source));
+ dev_info(dev, "UDP dest port %d\n",
+ ntohs(udphdr->dest));
+ }
+ dev_info(dev, "skb[%d] %p len %d proto 0x%x\n",
+ j, skb, skb->len, skb->protocol);
+ }
+ }
+ }
+
+ if (lancer_chip(adapter)) {
+ dev_info(dev, "Initiating reset due to tx timeout\n");
+ dev_info(dev, "Resetting adapter\n");
+ status = lancer_physdev_ctrl(adapter,
+ PHYSDEV_CONTROL_FW_RESET_MASK);
+ if (status)
+ dev_err(dev, "Reset failed .. Reboot server\n");
+ }
+}
+
static inline bool be_in_all_promisc(struct be_adapter *adapter)
{
return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
@@ -3274,7 +3359,7 @@ void be_detect_error(struct be_adapter *adapter)
/* Do not log error messages if its a FW reset */
if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
- dev_info(dev, "Firmware update in progress\n");
+ dev_info(dev, "Reset is in progress\n");
} else {
dev_err(dev, "Error detected in the card\n");
dev_err(dev, "ERR: sliport status 0x%x\n",
@@ -3403,9 +3488,11 @@ static int be_msix_register(struct be_adapter *adapter)
int status, i, vec;
for_all_evt_queues(adapter, eqo, i) {
- sprintf(eqo->desc, "%s-q%d", netdev->name, i);
+ char irq_name[IFNAMSIZ+4];
+
+ snprintf(irq_name, sizeof(irq_name), "%s-q%d", netdev->name, i);
vec = be_msix_vec_get(adapter, eqo);
- status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
+ status = request_irq(vec, be_msix, 0, irq_name, eqo);
if (status)
goto err_msix;
@@ -5216,6 +5303,7 @@ static const struct net_device_ops be_netdev_ops = {
.ndo_get_vf_config = be_get_vf_config,
.ndo_set_vf_link_state = be_set_vf_link_state,
.ndo_set_vf_spoofchk = be_set_vf_spoofchk,
+ .ndo_tx_timeout = be_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = be_netpoll,
#endif
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index ab02057ac730..65a22cd9aef2 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -1171,7 +1171,7 @@ static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
buf_prefix_content.pass_prs_result = true;
buf_prefix_content.pass_hash_result = true;
- buf_prefix_content.pass_time_stamp = false;
+ buf_prefix_content.pass_time_stamp = true;
buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
params.specific_params.non_rx_params.err_fqid = errq->fqid;
@@ -1213,7 +1213,7 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
buf_prefix_content.pass_prs_result = true;
buf_prefix_content.pass_hash_result = true;
- buf_prefix_content.pass_time_stamp = false;
+ buf_prefix_content.pass_time_stamp = true;
buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
rx_p = &params.specific_params.rx_params;
@@ -1610,14 +1610,28 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
{
const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
struct device *dev = priv->net_dev->dev.parent;
+ struct skb_shared_hwtstamps shhwtstamps;
dma_addr_t addr = qm_fd_addr(fd);
const struct qm_sg_entry *sgt;
struct sk_buff **skbh, *skb;
int nr_frags, i;
+ u64 ns;
skbh = (struct sk_buff **)phys_to_virt(addr);
skb = *skbh;
+ if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+
+ if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh,
+ &ns)) {
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ } else {
+ dev_warn(dev, "fman_port_get_tstamp failed!\n");
+ }
+ }
+
if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
nr_frags = skb_shinfo(skb)->nr_frags;
dma_unmap_single(dev, addr,
@@ -2087,6 +2101,11 @@ static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
if (unlikely(err < 0))
goto skb_to_fd_failed;
+ if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ }
+
if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
return NETDEV_TX_OK;
@@ -2228,6 +2247,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
const struct qm_dqrr_entry *dq)
{
+ struct skb_shared_hwtstamps *shhwtstamps;
struct rtnl_link_stats64 *percpu_stats;
struct dpaa_percpu_priv *percpu_priv;
const struct qm_fd *fd = &dq->fd;
@@ -2241,6 +2261,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
struct sk_buff *skb;
int *count_ptr;
void *vaddr;
+ u64 ns;
fd_status = be32_to_cpu(fd->status);
fd_format = qm_fd_get_format(fd);
@@ -2305,6 +2326,16 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
if (!skb)
return qman_cb_dqrr_consume;
+ if (priv->rx_tstamp) {
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+
+ if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns))
+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
+ else
+ dev_warn(net_dev->dev.parent, "fman_port_get_tstamp failed!\n");
+ }
+
skb->protocol = eth_type_trans(skb, net_dev);
if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use &&
@@ -2524,11 +2555,58 @@ static int dpaa_eth_stop(struct net_device *net_dev)
return err;
}
+static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct dpaa_priv *priv = netdev_priv(dev);
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ /* Couldn't disable rx/tx timestamping separately.
+ * Do nothing here.
+ */
+ priv->tx_tstamp = false;
+ break;
+ case HWTSTAMP_TX_ON:
+ priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
+ priv->tx_tstamp = true;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
+ /* Couldn't disable rx/tx timestamping separately.
+ * Do nothing here.
+ */
+ priv->rx_tstamp = false;
+ } else {
+ priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
+ priv->rx_tstamp = true;
+ /* TS is set for all frame types, not only those requested */
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ }
+
+ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
{
- if (!net_dev->phydev)
- return -EINVAL;
- return phy_mii_ioctl(net_dev->phydev, rq, cmd);
+ int ret = -EINVAL;
+
+ if (cmd == SIOCGMIIREG) {
+ if (net_dev->phydev)
+ return phy_mii_ioctl(net_dev->phydev, rq, cmd);
+ }
+
+ if (cmd == SIOCSHWTSTAMP)
+ return dpaa_ts_ioctl(net_dev, rq, cmd);
+
+ return ret;
}
static const struct net_device_ops dpaa_ops = {
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
index bd9422082f83..af320f83c742 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -182,6 +182,9 @@ struct dpaa_priv {
struct dpaa_buffer_layout buf_layout[2];
u16 rx_headroom;
+
+ bool tx_tstamp; /* Tx timestamping enabled */
+ bool rx_tstamp; /* Rx timestamping enabled */
};
/* from dpaa_ethtool.c */
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 2f933b6b2f4e..3184c8f7cdd0 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -32,6 +32,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/string.h>
+#include <linux/of_platform.h>
+#include <linux/net_tstamp.h>
+#include <linux/fsl/ptp_qoriq.h>
#include "dpaa_eth.h"
#include "mac.h"
@@ -515,6 +518,41 @@ static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return ret;
}
+static int dpaa_get_ts_info(struct net_device *net_dev,
+ struct ethtool_ts_info *info)
+{
+ struct device *dev = net_dev->dev.parent;
+ struct device_node *mac_node = dev->of_node;
+ struct device_node *fman_node = NULL, *ptp_node = NULL;
+ struct platform_device *ptp_dev = NULL;
+ struct qoriq_ptp *ptp = NULL;
+
+ info->phc_index = -1;
+
+ fman_node = of_get_parent(mac_node);
+ if (fman_node)
+ ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
+
+ if (ptp_node)
+ ptp_dev = of_find_device_by_node(ptp_node);
+
+ if (ptp_dev)
+ ptp = platform_get_drvdata(ptp_dev);
+
+ if (ptp)
+ info->phc_index = ptp->phc_index;
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
const struct ethtool_ops dpaa_ethtool_ops = {
.get_drvinfo = dpaa_get_drvinfo,
.get_msglevel = dpaa_get_msglevel,
@@ -530,4 +568,5 @@ const struct ethtool_ops dpaa_ethtool_ops = {
.set_link_ksettings = dpaa_set_link_ksettings,
.get_rxnfc = dpaa_get_rxnfc,
.set_rxnfc = dpaa_set_rxnfc,
+ .get_ts_info = dpaa_get_ts_info,
};
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index c729665107f5..76366c735831 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -48,6 +48,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/clk.h>
+#include <linux/crc32.h>
#include <linux/platform_device.h>
#include <linux/mdio.h>
#include <linux/phy.h>
@@ -2955,7 +2956,7 @@ static void set_multicast_list(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct netdev_hw_addr *ha;
- unsigned int i, bit, data, crc, tmp;
+ unsigned int crc, tmp;
unsigned char hash;
unsigned int hash_high = 0, hash_low = 0;
@@ -2983,15 +2984,7 @@ static void set_multicast_list(struct net_device *ndev)
/* Add the addresses in hash register */
netdev_for_each_mc_addr(ha, ndev) {
/* calculate crc32 value of mac address */
- crc = 0xffffffff;
-
- for (i = 0; i < ndev->addr_len; i++) {
- data = ha->addr[i];
- for (bit = 0; bit < 8; bit++, data >>= 1) {
- crc = (crc >> 1) ^
- (((crc ^ data) & 1) ? CRC32_POLY : 0);
- }
- }
+ crc = ether_crc_le(ndev->addr_len, ha->addr);
/* only upper 6 bits (FEC_HASH_BITS) are used
* which point to specific bit in the hash registers
@@ -3136,6 +3129,7 @@ static int fec_enet_init(struct net_device *ndev)
unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
sizeof(struct bufdesc);
unsigned dsize_log2 = __fls(dsize);
+ int ret;
WARN_ON(dsize != (1 << dsize_log2));
#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
@@ -3146,6 +3140,13 @@ static int fec_enet_init(struct net_device *ndev)
fep->tx_align = 0x3;
#endif
+ /* Check mask of the streaming and coherent API */
+ ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32));
+ if (ret < 0) {
+ dev_warn(&fep->pdev->dev, "No suitable DMA available\n");
+ return ret;
+ }
+
fec_enet_alloc_queue(ndev);
bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 36c2d7d6ee1b..7e892b1cbd3d 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -99,7 +99,6 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
{
unsigned long flags;
u32 val, tempval;
- int inc;
struct timespec64 ts;
u64 ns;
val = 0;
@@ -114,7 +113,6 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
fep->pps_channel = DEFAULT_PPS_CHANNEL;
fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
- inc = fep->ptp_inc;
spin_lock_irqsave(&fep->tmreg_lock, flags);
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index 9530405030a7..c415ac67cb7b 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -2801,7 +2801,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
of_node_put(muram_node);
of_node_put(fm_node);
- err = devm_request_irq(&of_dev->dev, irq, fman_irq, 0, "fman", fman);
+ err = devm_request_irq(&of_dev->dev, irq, fman_irq, IRQF_SHARED,
+ "fman", fman);
if (err < 0) {
dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n",
__func__, irq, err);
diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h
index bfa02e0014ae..935c317fa696 100644
--- a/drivers/net/ethernet/freescale/fman/fman.h
+++ b/drivers/net/ethernet/freescale/fman/fman.h
@@ -41,6 +41,7 @@
/* Frame queue Context Override */
#define FM_FD_CMD_FCO 0x80000000
#define FM_FD_CMD_RPD 0x40000000 /* Read Prepended Data */
+#define FM_FD_CMD_UPD 0x20000000 /* Update Prepended Data */
#define FM_FD_CMD_DTC 0x10000000 /* Do L4 Checksum */
/* TX-Port: Unsupported Format */
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 57b1e2b47c0a..1ca543ac8f2c 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -123,11 +123,13 @@
#define DTSEC_ECNTRL_R100M 0x00000008
#define DTSEC_ECNTRL_QSGMIIM 0x00000001
+#define TCTRL_TTSE 0x00000040
#define TCTRL_GTS 0x00000020
#define RCTRL_PAL_MASK 0x001f0000
#define RCTRL_PAL_SHIFT 16
#define RCTRL_GHTX 0x00000400
+#define RCTRL_RTSE 0x00000040
#define RCTRL_GRS 0x00000020
#define RCTRL_MPROM 0x00000008
#define RCTRL_RSF 0x00000004
@@ -1136,6 +1138,31 @@ int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
return 0;
}
+int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 rctrl, tctrl;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ rctrl = ioread32be(&regs->rctrl);
+ tctrl = ioread32be(&regs->tctrl);
+
+ if (enable) {
+ rctrl |= RCTRL_RTSE;
+ tctrl |= TCTRL_TTSE;
+ } else {
+ rctrl &= ~RCTRL_RTSE;
+ tctrl &= ~TCTRL_TTSE;
+ }
+
+ iowrite32be(rctrl, &regs->rctrl);
+ iowrite32be(tctrl, &regs->tctrl);
+
+ return 0;
+}
+
int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
index 1a689adf5a22..5149d96ec2c1 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
@@ -56,5 +56,6 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version);
int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable);
+int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable);
#endif /* __DTSEC_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 446a97b792e3..bc6eb30aa20f 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -964,6 +964,11 @@ int memac_set_allmulti(struct fman_mac *memac, bool enable)
return 0;
}
+int memac_set_tstamp(struct fman_mac *memac, bool enable)
+{
+ return 0; /* Always enabled. */
+}
+
int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
{
struct memac_regs __iomem *regs = memac->regs;
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h
index b5a50338ed9a..b2c671ec0ce7 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.h
@@ -58,5 +58,6 @@ int memac_set_exception(struct fman_mac *memac,
int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
int memac_set_allmulti(struct fman_mac *memac, bool enable);
+int memac_set_tstamp(struct fman_mac *memac, bool enable);
#endif /* __MEMAC_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index ecbf6187e13a..ee82ee1384eb 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -1739,6 +1739,18 @@ int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset)
}
EXPORT_SYMBOL(fman_port_get_hash_result_offset);
+int fman_port_get_tstamp(struct fman_port *port, const void *data, u64 *tstamp)
+{
+ if (port->buffer_offsets.time_stamp_offset == ILLEGAL_BASE)
+ return -EINVAL;
+
+ *tstamp = be64_to_cpu(*(__be64 *)(data +
+ port->buffer_offsets.time_stamp_offset));
+
+ return 0;
+}
+EXPORT_SYMBOL(fman_port_get_tstamp);
+
static int fman_port_probe(struct platform_device *of_dev)
{
struct fman_port *port;
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.h b/drivers/net/ethernet/freescale/fman/fman_port.h
index e86ca6a34e4e..9dbb69f40121 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.h
+++ b/drivers/net/ethernet/freescale/fman/fman_port.h
@@ -153,6 +153,8 @@ u32 fman_port_get_qman_channel_id(struct fman_port *port);
int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset);
+int fman_port_get_tstamp(struct fman_port *port, const void *data, u64 *tstamp);
+
struct fman_port *fman_port_bind(struct device *dev);
#endif /* __FMAN_PORT_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
index 284735d4ebe9..40705938eecc 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -44,6 +44,7 @@
#define TGEC_TX_IPG_LENGTH_MASK 0x000003ff
/* Command and Configuration Register (COMMAND_CONFIG) */
+#define CMD_CFG_EN_TIMESTAMP 0x00100000
#define CMD_CFG_NO_LEN_CHK 0x00020000
#define CMD_CFG_PAUSE_IGNORE 0x00000100
#define CMF_CFG_CRC_FWD 0x00000040
@@ -588,6 +589,26 @@ int tgec_set_allmulti(struct fman_mac *tgec, bool enable)
return 0;
}
+int tgec_set_tstamp(struct fman_mac *tgec, bool enable)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+ u32 tmp;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->command_config);
+
+ if (enable)
+ tmp |= CMD_CFG_EN_TIMESTAMP;
+ else
+ tmp &= ~CMD_CFG_EN_TIMESTAMP;
+
+ iowrite32be(tmp, &regs->command_config);
+
+ return 0;
+}
+
int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
{
struct tgec_regs __iomem *regs = tgec->regs;
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.h b/drivers/net/ethernet/freescale/fman/fman_tgec.h
index cbbd3b422a98..3bfd1062b386 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.h
@@ -52,5 +52,6 @@ int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
int tgec_get_version(struct fman_mac *tgec, u32 *mac_version);
int tgec_set_allmulti(struct fman_mac *tgec, bool enable);
+int tgec_set_tstamp(struct fman_mac *tgec, bool enable);
#endif /* __TGEC_H */
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 7b5b95f52c09..a847b9c3b31a 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -471,6 +471,7 @@ static void setup_dtsec(struct mac_device *mac_dev)
mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames;
mac_dev->set_exception = dtsec_set_exception;
mac_dev->set_allmulti = dtsec_set_allmulti;
+ mac_dev->set_tstamp = dtsec_set_tstamp;
mac_dev->set_multi = set_multi;
mac_dev->start = start;
mac_dev->stop = stop;
@@ -490,6 +491,7 @@ static void setup_tgec(struct mac_device *mac_dev)
mac_dev->set_rx_pause = tgec_accept_rx_pause_frames;
mac_dev->set_exception = tgec_set_exception;
mac_dev->set_allmulti = tgec_set_allmulti;
+ mac_dev->set_tstamp = tgec_set_tstamp;
mac_dev->set_multi = set_multi;
mac_dev->start = start;
mac_dev->stop = stop;
@@ -509,6 +511,7 @@ static void setup_memac(struct mac_device *mac_dev)
mac_dev->set_rx_pause = memac_accept_rx_pause_frames;
mac_dev->set_exception = memac_set_exception;
mac_dev->set_allmulti = memac_set_allmulti;
+ mac_dev->set_tstamp = memac_set_tstamp;
mac_dev->set_multi = set_multi;
mac_dev->start = start;
mac_dev->stop = stop;
diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
index b520cec120ee..824a81a9f350 100644
--- a/drivers/net/ethernet/freescale/fman/mac.h
+++ b/drivers/net/ethernet/freescale/fman/mac.h
@@ -68,6 +68,7 @@ struct mac_device {
int (*set_promisc)(struct fman_mac *mac_dev, bool enable);
int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr);
int (*set_allmulti)(struct fman_mac *mac_dev, bool enable);
+ int (*set_tstamp)(struct fman_mac *mac_dev, bool enable);
int (*set_multi)(struct net_device *net_dev,
struct mac_device *mac_dev);
int (*set_rx_pause)(struct fman_mac *mac_dev, bool en);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index 1fc27c97e3b2..99fe2c210d0f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -18,6 +18,7 @@
#include <linux/string.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
+#include <linux/crc32.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
@@ -176,21 +177,10 @@ static void set_multicast_start(struct net_device *dev)
static void set_multicast_one(struct net_device *dev, const u8 *mac)
{
struct fs_enet_private *fep = netdev_priv(dev);
- int temp, hash_index, i, j;
+ int temp, hash_index;
u32 crc, csrVal;
- u8 byte, msb;
-
- crc = 0xffffffff;
- for (i = 0; i < 6; i++) {
- byte = mac[i];
- for (j = 0; j < 8; j++) {
- msb = crc >> 31;
- crc <<= 1;
- if (msb ^ (byte & 0x1))
- crc ^= FEC_CRC_POLY;
- byte >>= 1;
- }
- }
+
+ crc = ether_crc(6, mac);
temp = (crc & 0x3f) >> 1;
hash_index = ((temp & 0x01) << 4) |
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 8cb98cae0a6f..395a5266ea30 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -740,7 +740,6 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
u64 class)
{
- unsigned int last_rule_idx = priv->cur_filer_idx;
unsigned int cmp_rqfpr;
unsigned int *local_rqfpr;
unsigned int *local_rqfcr;
@@ -819,7 +818,6 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
}
priv->cur_filer_idx = l - 1;
- last_rule_idx = l;
/* hash rules */
ethflow_to_filer_rules(priv, ethflow);
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 42fca3208c0b..22a817da861e 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3096,6 +3096,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
ugeth_vdbg("%s: IN", __func__);
+ netdev_sent_queue(dev, skb->len);
spin_lock_irqsave(&ugeth->lock, flags);
dev->stats.tx_bytes += skb->len;
@@ -3240,6 +3241,8 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
{
/* Start from the next BD that should be filled */
struct ucc_geth_private *ugeth = netdev_priv(dev);
+ unsigned int bytes_sent = 0;
+ int howmany = 0;
u8 __iomem *bd; /* BD pointer */
u32 bd_status;
@@ -3257,7 +3260,8 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
if (!skb)
break;
-
+ howmany++;
+ bytes_sent += skb->len;
dev->stats.tx_packets++;
dev_consume_skb_any(skb);
@@ -3279,6 +3283,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
bd_status = in_be32((u32 __iomem *)bd);
}
ugeth->confBd[txQ] = bd;
+ netdev_completed_queue(dev, howmany, bytes_sent);
return 0;
}
@@ -3479,6 +3484,7 @@ static int ucc_geth_open(struct net_device *dev)
phy_start(ugeth->phydev);
napi_enable(&ugeth->napi);
+ netdev_reset_queue(dev);
netif_start_queue(dev);
device_set_wakeup_capable(&dev->dev,
@@ -3509,6 +3515,7 @@ static int ucc_geth_close(struct net_device *dev)
free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
netif_stop_queue(dev);
+ netdev_reset_queue(dev);
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index fb1a7251f45d..25152715396b 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -85,10 +85,12 @@ config HNS3
drivers(like ODP)to register with HNAE devices and their associated
operations.
+if HNS3
+
config HNS3_HCLGE
tristate "Hisilicon HNS3 HCLGE Acceleration Engine & Compatibility Layer Support"
+ default m
depends on PCI_MSI
- depends on HNS3
---help---
This selects the HNS3_HCLGE network acceleration engine & its hardware
compatibility layer. The engine would be used in Hisilicon hip08 family of
@@ -97,16 +99,15 @@ config HNS3_HCLGE
config HNS3_DCB
bool "Hisilicon HNS3 Data Center Bridge Support"
default n
- depends on HNS3 && HNS3_HCLGE && DCB
+ depends on HNS3_HCLGE && DCB
---help---
Say Y here if you want to use Data Center Bridging (DCB) in the HNS3 driver.
If unsure, say N.
config HNS3_HCLGEVF
- tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support"
- depends on PCI_MSI
- depends on HNS3
+ tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support"
+ depends on PCI_MSI
depends on HNS3_HCLGE
---help---
This selects the HNS3 VF drivers network acceleration engine & its hardware
@@ -115,11 +116,13 @@ config HNS3_HCLGEVF
config HNS3_ENET
tristate "Hisilicon HNS3 Ethernet Device Support"
+ default m
depends on 64BIT && PCI
- depends on HNS3
---help---
This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
family of SoCs. This module depends upon HNAE3 driver to access the HNAE3
devices and their associated operations.
+endif #HNS3
+
endif # NET_VENDOR_HISILICON
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 340e28211135..14374a856d30 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -904,7 +904,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
hip04_config_port(ndev, SPEED_100, DUPLEX_FULL);
hip04_config_fifo(priv);
- random_ether_addr(ndev->dev_addr);
+ eth_random_addr(ndev->dev_addr);
hip04_update_mac_address(ndev);
ret = hip04_alloc_ring(ndev, d);
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index 25a6c8722eca..c5727003af8c 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -1006,12 +1006,11 @@ static int hix5hd2_init_hw_desc_queue(struct hix5hd2_priv *priv)
for (i = 0; i < QUEUE_NUMS; i++) {
size = priv->pool[i].count * sizeof(struct hix5hd2_desc);
- virt_addr = dma_alloc_coherent(dev, size, &phys_addr,
- GFP_KERNEL);
+ virt_addr = dma_zalloc_coherent(dev, size, &phys_addr,
+ GFP_KERNEL);
if (virt_addr == NULL)
goto error_free_pool;
- memset(virt_addr, 0, size);
priv->pool[i].size = size;
priv->pool[i].desc = virt_addr;
priv->pool[i].phys_addr = phys_addr;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index bd68379d2bea..e6aad30e7e69 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -70,8 +70,8 @@ static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q)
return container_of(q, struct ring_pair_cb, q);
}
-struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
- u32 port_id)
+static struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
+ u32 port_id)
{
int vfnum_per_port;
int qnum_per_vf;
@@ -329,7 +329,7 @@ static int hns_ae_start(struct hnae_handle *handle)
return 0;
}
-void hns_ae_stop(struct hnae_handle *handle)
+static void hns_ae_stop(struct hnae_handle *handle)
{
struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
@@ -357,7 +357,7 @@ static void hns_ae_reset(struct hnae_handle *handle)
}
}
-void hns_ae_toggle_ring_irq(struct hnae_ring *ring, u32 mask)
+static void hns_ae_toggle_ring_irq(struct hnae_ring *ring, u32 mask)
{
u32 flag;
@@ -577,8 +577,8 @@ static void hns_ae_get_coalesce_range(struct hnae_handle *handle,
*rx_usecs_high = HNS_RCB_RX_USECS_HIGH;
}
-void hns_ae_update_stats(struct hnae_handle *handle,
- struct net_device_stats *net_stats)
+static void hns_ae_update_stats(struct hnae_handle *handle,
+ struct net_device_stats *net_stats)
{
int port;
int idx;
@@ -660,7 +660,7 @@ void hns_ae_update_stats(struct hnae_handle *handle,
net_stats->multicast = mac_cb->hw_stats.rx_mc_pkts;
}
-void hns_ae_get_stats(struct hnae_handle *handle, u64 *data)
+static void hns_ae_get_stats(struct hnae_handle *handle, u64 *data)
{
int idx;
struct hns_mac_cb *mac_cb;
@@ -692,8 +692,8 @@ void hns_ae_get_stats(struct hnae_handle *handle, u64 *data)
hns_dsaf_get_stats(vf_cb->dsaf_dev, p, vf_cb->port_index);
}
-void hns_ae_get_strings(struct hnae_handle *handle,
- u32 stringset, u8 *data)
+static void hns_ae_get_strings(struct hnae_handle *handle,
+ u32 stringset, u8 *data)
{
int port;
int idx;
@@ -725,7 +725,7 @@ void hns_ae_get_strings(struct hnae_handle *handle,
hns_dsaf_get_strings(stringset, p, port, dsaf_dev);
}
-int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset)
+static int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset)
{
u32 sset_count = 0;
struct hns_mac_cb *mac_cb;
@@ -771,7 +771,7 @@ static int hns_ae_config_loopback(struct hnae_handle *handle,
return ret;
}
-void hns_ae_update_led_status(struct hnae_handle *handle)
+static void hns_ae_update_led_status(struct hnae_handle *handle)
{
struct hns_mac_cb *mac_cb;
@@ -783,8 +783,8 @@ void hns_ae_update_led_status(struct hnae_handle *handle)
hns_set_led_opt(mac_cb);
}
-int hns_ae_cpld_set_led_id(struct hnae_handle *handle,
- enum hnae_led_state status)
+static int hns_ae_cpld_set_led_id(struct hnae_handle *handle,
+ enum hnae_led_state status)
{
struct hns_mac_cb *mac_cb;
@@ -795,7 +795,7 @@ int hns_ae_cpld_set_led_id(struct hnae_handle *handle,
return hns_cpld_led_set_id(mac_cb, status);
}
-void hns_ae_get_regs(struct hnae_handle *handle, void *data)
+static void hns_ae_get_regs(struct hnae_handle *handle, void *data)
{
u32 *p = data;
int i;
@@ -820,7 +820,7 @@ void hns_ae_get_regs(struct hnae_handle *handle, void *data)
hns_dsaf_get_regs(vf_cb->dsaf_dev, vf_cb->port_index, p);
}
-int hns_ae_get_regs_len(struct hnae_handle *handle)
+static int hns_ae_get_regs_len(struct hnae_handle *handle)
{
u32 total_num;
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 74bd260ca02a..5488c6e89f21 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -339,7 +339,7 @@ static void hns_gmac_init(void *mac_drv)
GMAC_TX_WATER_LINE_SHIFT, 8);
}
-void hns_gmac_update_stats(void *mac_drv)
+static void hns_gmac_update_stats(void *mac_drv)
{
struct mac_hw_stats *hw_stats = NULL;
struct mac_driver *drv = (struct mac_driver *)mac_drv;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 794516718d9d..1c2326bd76e2 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -458,11 +458,6 @@ int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu, u32 buf_size)
{
struct mac_driver *drv = hns_mac_get_drv(mac_cb);
u32 new_frm = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
- u32 max_frm = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver) ?
- MAC_MAX_MTU : MAC_MAX_MTU_V2;
-
- if (mac_cb->mac_type == HNAE_PORT_DEBUG)
- max_frm = MAC_MAX_MTU_DBG;
if (new_frm > HNS_RCB_RING_MAX_BD_PER_PKT * buf_size)
return -EINVAL;
@@ -933,8 +928,9 @@ static int hns_mac_get_mode(phy_interface_t phy_if)
}
}
-u8 __iomem *hns_mac_get_vaddr(struct dsaf_device *dsaf_dev,
- struct hns_mac_cb *mac_cb, u32 mac_mode_idx)
+static u8 __iomem *
+hns_mac_get_vaddr(struct dsaf_device *dsaf_dev,
+ struct hns_mac_cb *mac_cb, u32 mac_mode_idx)
{
u8 __iomem *base = dsaf_dev->io_base;
int mac_id = mac_cb->mac_id;
@@ -952,7 +948,8 @@ u8 __iomem *hns_mac_get_vaddr(struct dsaf_device *dsaf_dev,
* @mac_cb: mac control block
* return 0 - success , negative --fail
*/
-int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb)
+static int
+hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb)
{
int ret;
u32 mac_mode_idx;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 0ce07f6eb1e6..ca50c2553a9c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -28,7 +28,7 @@
#include "hns_dsaf_rcb.h"
#include "hns_dsaf_misc.h"
-const char *g_dsaf_mode_match[DSAF_MODE_MAX] = {
+const static char *g_dsaf_mode_match[DSAF_MODE_MAX] = {
[DSAF_MODE_DISABLE_2PORT_64VM] = "2port-64vf",
[DSAF_MODE_DISABLE_6PORT_0VM] = "6port-16rss",
[DSAF_MODE_DISABLE_6PORT_16VM] = "6port-16vf",
@@ -42,7 +42,7 @@ static const struct acpi_device_id hns_dsaf_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, hns_dsaf_acpi_match);
-int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
+static int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
{
int ret, i;
u32 desc_num;
@@ -959,7 +959,8 @@ static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address)
spin_unlock_bh(&dsaf_dev->tcam_lock);
}
-void hns_dsaf_tcam_addr_get(struct dsaf_drv_tbl_tcam_key *mac_key, u8 *addr)
+static void
+hns_dsaf_tcam_addr_get(struct dsaf_drv_tbl_tcam_key *mac_key, u8 *addr)
{
addr[0] = mac_key->high.bits.mac_0;
addr[1] = mac_key->high.bits.mac_1;
@@ -1682,7 +1683,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
struct dsaf_tbl_tcam_mcast_cfg mac_data;
struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
- struct dsaf_drv_tbl_tcam_key tmp_mac_key;
struct dsaf_tbl_tcam_data tcam_data;
u8 mc_addr[ETH_ALEN];
int mskid;
@@ -1739,10 +1739,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
/* if exist, add in */
hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data,
&mac_data);
-
- tmp_mac_key.high.val =
- le32_to_cpu(tcam_data.tbl_tcam_data_high);
- tmp_mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
}
/* config hardware entry */
@@ -1852,7 +1848,7 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
struct dsaf_tbl_tcam_data tcam_data;
int mskid;
const u8 empty_msk[sizeof(mac_data.tbl_mcast_port_msk)] = {0};
- struct dsaf_drv_tbl_tcam_key mask_key, tmp_mac_key;
+ struct dsaf_drv_tbl_tcam_key mask_key;
struct dsaf_tbl_tcam_data *pmask_key = NULL;
u8 mc_addr[ETH_ALEN];
@@ -1915,9 +1911,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
/* read entry */
hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data, &mac_data);
- tmp_mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high);
- tmp_mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
-
/*del the port*/
if (mac_entry->port_num < DSAF_SERVICE_NW_NUM) {
mskid = mac_entry->port_num;
@@ -2084,8 +2077,9 @@ static void hns_dsaf_pfc_unit_cnt(struct dsaf_device *dsaf_dev, int mac_id,
* @dsaf_id: dsa fabric id
* @xge_ge_work_mode
*/
-void hns_dsaf_port_work_rate_cfg(struct dsaf_device *dsaf_dev, int mac_id,
- enum dsaf_port_rate_mode rate_mode)
+static void
+hns_dsaf_port_work_rate_cfg(struct dsaf_device *dsaf_dev, int mac_id,
+ enum dsaf_port_rate_mode rate_mode)
{
u32 port_work_mode;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index acf29633ec79..16294cd3c954 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -340,7 +340,8 @@ static void hns_dsaf_xge_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
* bit18-19 for com/dfx
* @enable: false - request reset , true - drop reset
*/
-void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
+static void
+hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
{
u32 reg_addr;
@@ -362,7 +363,7 @@ void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
* bit18-19 for com/dfx
* @enable: false - request reset , true - drop reset
*/
-void
+static void
hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
{
hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
@@ -370,7 +371,7 @@ hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
msk, dereset);
}
-void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset)
+static void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset)
{
if (!dereset) {
dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_RESET_REQ_REG, 1);
@@ -384,7 +385,7 @@ void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset)
}
}
-void hns_dsaf_roce_srst_acpi(struct dsaf_device *dsaf_dev, bool dereset)
+static void hns_dsaf_roce_srst_acpi(struct dsaf_device *dsaf_dev, bool dereset)
{
hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
HNS_ROCE_RESET_FUNC, 0, dereset);
@@ -568,7 +569,7 @@ static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb)
return phy_if;
}
-int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
+static int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
{
u32 val = 0;
int ret;
@@ -586,7 +587,7 @@ int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
return 0;
}
-int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
+static int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
{
union acpi_object *obj;
union acpi_object obj_args, argv4;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index 93e71e27401b..d160d8c9e45b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -73,7 +73,7 @@ hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common)
* comm_index: common index
* retuen 0 - success , negative --fail
*/
-int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index)
+static int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index)
{
struct ppe_common_cb *ppe_common;
int ppe_num;
@@ -104,7 +104,8 @@ int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index)
return 0;
}
-void hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
+static void
+hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
{
dsaf_dev->ppe_common[comm_index] = NULL;
}
@@ -203,9 +204,9 @@ static int hns_ppe_common_init_hw(struct ppe_common_cb *ppe_common)
enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode;
dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 0);
- mdelay(100);
+ msleep(100);
dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 1);
- mdelay(100);
+ msleep(100);
if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) {
switch (dsaf_mode) {
@@ -337,7 +338,7 @@ static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb)
}
}
-void hns_ppe_uninit_ex(struct ppe_common_cb *ppe_common)
+static void hns_ppe_uninit_ex(struct ppe_common_cb *ppe_common)
{
u32 i;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index e2e28532e4dc..9d76e2e54f9d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -705,7 +705,7 @@ void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, u16 *max_vfn,
}
}
-int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
+static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
{
switch (dsaf_dev->dsaf_mode) {
case DSAF_MODE_ENABLE_FIX:
@@ -741,7 +741,7 @@ int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
}
}
-void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
+static void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
{
struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index 51e7e9f5af49..ba4316910dea 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -215,10 +215,10 @@ static void hns_xgmac_init(void *mac_drv)
u32 port = drv->mac_id;
dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 0);
- mdelay(100);
+ msleep(100);
dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 1);
- mdelay(100);
+ msleep(100);
hns_xgmac_lf_rf_control_init(drv);
hns_xgmac_exc_irq_en(drv, 0);
@@ -311,7 +311,7 @@ static void hns_xgmac_config_max_frame_length(void *mac_drv, u16 newval)
dsaf_write_dev(drv, XGMAC_MAC_MAX_PKT_SIZE_REG, newval);
}
-void hns_xgmac_update_stats(void *mac_drv)
+static void hns_xgmac_update_stats(void *mac_drv)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
struct mac_hw_stats *hw_stats = &drv->mac_cb->hw_stats;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 5608f807d7ba..9f2b552aee33 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1300,7 +1300,7 @@ static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p)
return 0;
}
-void hns_nic_update_stats(struct net_device *netdev)
+static void hns_nic_update_stats(struct net_device *netdev)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
@@ -1582,7 +1582,7 @@ static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
/* use only for netconsole to poll with the device without interrupt */
#ifdef CONFIG_NET_POLL_CONTROLLER
-void hns_nic_poll_controller(struct net_device *ndev)
+static void hns_nic_poll_controller(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
unsigned long flags;
@@ -1935,7 +1935,7 @@ static int hns_nic_uc_unsync(struct net_device *netdev,
*
* return void
*/
-void hns_set_multicast_list(struct net_device *ndev)
+static void hns_set_multicast_list(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
@@ -1957,7 +1957,7 @@ void hns_set_multicast_list(struct net_device *ndev)
}
}
-void hns_nic_set_rx_mode(struct net_device *ndev)
+static void hns_nic_set_rx_mode(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
@@ -2022,7 +2022,8 @@ static void hns_nic_get_stats64(struct net_device *ndev,
static u16
hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
struct hns_nic_priv *priv = netdev_priv(ndev);
@@ -2032,7 +2033,7 @@ hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
is_multicast_ether_addr(eth_hdr->h_dest))
return 0;
else
- return fallback(ndev, skb);
+ return fallback(ndev, skb, NULL);
}
static const struct net_device_ops hns_nic_netdev_ops = {
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 2e14a3ae1d8b..08f3c4743f74 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -307,6 +307,7 @@ static int __lb_setup(struct net_device *ndev,
break;
case MAC_LOOP_PHY_NONE:
ret = hns_nic_config_phy_loopback(phy_dev, 0x0);
+ /* fall through */
case MAC_LOOP_NONE:
if (!ret && h->dev->ops->set_loopback) {
if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)
@@ -658,8 +659,8 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev,
* @dev: net device
* @param: ethtool parameter
*/
-void hns_get_ringparam(struct net_device *net_dev,
- struct ethtool_ringparam *param)
+static void hns_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *param)
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_ae_ops *ops;
@@ -808,7 +809,8 @@ static int hns_set_coalesce(struct net_device *net_dev,
* @dev: net device
* @ch: channel info.
*/
-void hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch)
+static void
+hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch)
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
@@ -825,8 +827,8 @@ void hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch)
* @stats: statistics info.
* @data: statistics data.
*/
-void hns_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
+static void hns_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
{
u64 *p = data;
struct hns_nic_priv *priv = netdev_priv(netdev);
@@ -883,7 +885,7 @@ void hns_get_ethtool_stats(struct net_device *netdev,
* @stats: string set ID.
* @data: objects data.
*/
-void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
@@ -973,7 +975,7 @@ void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
*
* Return string set count.
*/
-int hns_get_sset_count(struct net_device *netdev, int stringset)
+static int hns_get_sset_count(struct net_device *netdev, int stringset)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
@@ -1007,7 +1009,7 @@ int hns_get_sset_count(struct net_device *netdev, int stringset)
*
* Return 0 on success, negative on failure.
*/
-int hns_phy_led_set(struct net_device *netdev, int value)
+static int hns_phy_led_set(struct net_device *netdev, int value)
{
int retval;
struct phy_device *phy_dev = netdev->phydev;
@@ -1029,7 +1031,8 @@ int hns_phy_led_set(struct net_device *netdev, int value)
*
* Return 0 on success, negative on failure.
*/
-int hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
+static int
+hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
@@ -1103,8 +1106,8 @@ int hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
* @cmd: ethtool cmd
* @date: register data
*/
-void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
- void *data)
+static void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
+ void *data)
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_ae_ops *ops;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 9d79dad2c6aa..fff5be8078ac 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -1,14 +1,7 @@
-/*
- * Copyright (c) 2016-2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/list.h>
-#include <linux/slab.h>
#include <linux/spinlock.h>
#include "hnae3.h"
@@ -41,13 +34,13 @@ static void hnae3_set_client_init_flag(struct hnae3_client *client,
{
switch (client->type) {
case HNAE3_CLIENT_KNIC:
- hnae_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
+ hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
break;
case HNAE3_CLIENT_UNIC:
- hnae_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited);
+ hnae3_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited);
break;
case HNAE3_CLIENT_ROCE:
- hnae_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited);
+ hnae3_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited);
break;
default:
break;
@@ -61,16 +54,16 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client,
switch (client->type) {
case HNAE3_CLIENT_KNIC:
- inited = hnae_get_bit(ae_dev->flag,
+ inited = hnae3_get_bit(ae_dev->flag,
HNAE3_KNIC_CLIENT_INITED_B);
break;
case HNAE3_CLIENT_UNIC:
- inited = hnae_get_bit(ae_dev->flag,
+ inited = hnae3_get_bit(ae_dev->flag,
HNAE3_UNIC_CLIENT_INITED_B);
break;
case HNAE3_CLIENT_ROCE:
- inited = hnae_get_bit(ae_dev->flag,
- HNAE3_ROCE_CLIENT_INITED_B);
+ inited = hnae3_get_bit(ae_dev->flag,
+ HNAE3_ROCE_CLIENT_INITED_B);
break;
default:
break;
@@ -86,7 +79,7 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client,
/* check if this client matches the type of ae_dev */
if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
- hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
+ hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
return 0;
}
@@ -95,7 +88,7 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client,
ret = ae_dev->ops->init_client_instance(client, ae_dev);
if (ret) {
dev_err(&ae_dev->pdev->dev,
- "fail to instantiate client\n");
+ "fail to instantiate client, ret = %d\n", ret);
return ret;
}
@@ -135,7 +128,8 @@ int hnae3_register_client(struct hnae3_client *client)
ret = hnae3_match_n_instantiate(client, ae_dev, true);
if (ret)
dev_err(&ae_dev->pdev->dev,
- "match and instantiation failed for port\n");
+ "match and instantiation failed for port, ret = %d\n",
+ ret);
}
exit:
@@ -185,11 +179,12 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
ae_dev->ops = ae_algo->ops;
ret = ae_algo->ops->init_ae_dev(ae_dev);
if (ret) {
- dev_err(&ae_dev->pdev->dev, "init ae_dev error.\n");
+ dev_err(&ae_dev->pdev->dev,
+ "init ae_dev error, ret = %d\n", ret);
continue;
}
- hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
/* check the client list for the match with this ae_dev type and
* initialize the figure out client instance
@@ -198,7 +193,8 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
ret = hnae3_match_n_instantiate(client, ae_dev, true);
if (ret)
dev_err(&ae_dev->pdev->dev,
- "match and instantiation failed\n");
+ "match and instantiation failed, ret = %d\n",
+ ret);
}
}
@@ -218,7 +214,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_dev */
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
- if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
+ if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
continue;
id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
@@ -232,7 +228,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
hnae3_match_n_instantiate(client, ae_dev, false);
ae_algo->ops->uninit_ae_dev(ae_dev);
- hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
}
list_del(&ae_algo->node);
@@ -271,11 +267,12 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
/* ae_dev init should set flag */
ret = ae_dev->ops->init_ae_dev(ae_dev);
if (ret) {
- dev_err(&ae_dev->pdev->dev, "init ae_dev error\n");
+ dev_err(&ae_dev->pdev->dev,
+ "init ae_dev error, ret = %d\n", ret);
goto out_err;
}
- hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
break;
}
@@ -286,7 +283,8 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
ret = hnae3_match_n_instantiate(client, ae_dev, true);
if (ret)
dev_err(&ae_dev->pdev->dev,
- "match and instantiation failed\n");
+ "match and instantiation failed, ret = %d\n",
+ ret);
}
out_err:
@@ -306,7 +304,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_algo */
list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) {
- if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
+ if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
continue;
id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
@@ -317,7 +315,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
hnae3_match_n_instantiate(client, ae_dev, false);
ae_algo->ops->uninit_ae_dev(ae_dev);
- hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
}
list_del(&ae_dev->node);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 8acb1d116a02..67befff0bfc5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016-2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HNAE3_H
#define __HNAE3_H
@@ -62,10 +56,10 @@
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
#define hnae3_dev_roce_supported(hdev) \
- hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
+ hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
#define hnae3_dev_dcb_supported(hdev) \
- hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
+ hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
#define ring_ptr_move_fw(ring, p) \
((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
@@ -167,7 +161,6 @@ struct hnae3_client_ops {
#define HNAE3_CLIENT_NAME_LENGTH 16
struct hnae3_client {
char name[HNAE3_CLIENT_NAME_LENGTH];
- u16 version;
unsigned long state;
enum hnae3_client_type type;
const struct hnae3_client_ops *ops;
@@ -436,7 +429,6 @@ struct hnae3_dcb_ops {
struct hnae3_ae_algo {
const struct hnae3_ae_ops *ops;
struct list_head node;
- char name[HNAE3_CLASS_NAME_SIZE];
const struct pci_device_id *pdev_id_table;
};
@@ -509,17 +501,17 @@ struct hnae3_handle {
u32 numa_node_mask; /* for multi-chip support */
};
-#define hnae_set_field(origin, mask, shift, val) \
+#define hnae3_set_field(origin, mask, shift, val) \
do { \
(origin) &= (~(mask)); \
(origin) |= ((val) << (shift)) & (mask); \
} while (0)
-#define hnae_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
+#define hnae3_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
-#define hnae_set_bit(origin, shift, val) \
- hnae_set_field((origin), (0x1 << (shift)), (shift), (val))
-#define hnae_get_bit(origin, shift) \
- hnae_get_field((origin), (0x1 << (shift)), (shift))
+#define hnae3_set_bit(origin, shift, val) \
+ hnae3_set_field((origin), (0x1 << (shift)), (shift), (val))
+#define hnae3_get_bit(origin, shift) \
+ hnae3_get_field((origin), (0x1 << (shift)), (shift))
void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
index eb82700da7d0..ea5f8a84070d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016-2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#include "hnae3.h"
#include "hns3_enet.h"
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 25a73bb2e642..3554dca7a680 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016~2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
@@ -56,15 +50,16 @@ static const struct pci_device_id hns3_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
/* required last entry */
{0, }
};
MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
-static irqreturn_t hns3_irq_handle(int irq, void *dev)
+static irqreturn_t hns3_irq_handle(int irq, void *vector)
{
- struct hns3_enet_tqp_vector *tqp_vector = dev;
+ struct hns3_enet_tqp_vector *tqp_vector = vector;
napi_schedule(&tqp_vector->napi);
@@ -239,7 +234,28 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
struct hnae3_handle *h = hns3_get_handle(netdev);
struct hnae3_knic_private_info *kinfo = &h->kinfo;
unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
- int ret;
+ int i, ret;
+
+ if (kinfo->num_tc <= 1) {
+ netdev_reset_tc(netdev);
+ } else {
+ ret = netdev_set_num_tc(netdev, kinfo->num_tc);
+ if (ret) {
+ netdev_err(netdev,
+ "netdev_set_num_tc fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ if (!kinfo->tc_info[i].enable)
+ continue;
+
+ netdev_set_tc_queue(netdev,
+ kinfo->tc_info[i].tc,
+ kinfo->tc_info[i].tqp_count,
+ kinfo->tc_info[i].tqp_offset);
+ }
+ }
ret = netif_set_real_num_tx_queues(netdev, queue_size);
if (ret) {
@@ -312,7 +328,9 @@ out_start_err:
static int hns3_nic_net_open(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
- int ret;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_knic_private_info *kinfo;
+ int i, ret;
netif_carrier_off(netdev);
@@ -327,6 +345,12 @@ static int hns3_nic_net_open(struct net_device *netdev)
return ret;
}
+ kinfo = &h->kinfo;
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
+ netdev_set_prio_tc_map(netdev, i,
+ kinfo->prio_tc[i]);
+ }
+
priv->ae_handle->last_reset_time = jiffies;
return 0;
}
@@ -493,8 +517,8 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
/* find the txbd field values */
*paylen = skb->len - hdr_len;
- hnae_set_bit(*type_cs_vlan_tso,
- HNS3_TXD_TSO_B, 1);
+ hnae3_set_bit(*type_cs_vlan_tso,
+ HNS3_TXD_TSO_B, 1);
/* get MSS for TSO */
*mss = skb_shinfo(skb)->gso_size;
@@ -586,21 +610,21 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
/* compute L2 header size for normal packet, defined in 2 Bytes */
l2_len = l3.hdr - skb->data;
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
- HNS3_TXD_L2LEN_S, l2_len >> 1);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
+ HNS3_TXD_L2LEN_S, l2_len >> 1);
/* tunnel packet*/
if (skb->encapsulation) {
/* compute OL2 header size, defined in 2 Bytes */
ol2_len = l2_len;
- hnae_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_L2LEN_M,
- HNS3_TXD_L2LEN_S, ol2_len >> 1);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_L2LEN_M,
+ HNS3_TXD_L2LEN_S, ol2_len >> 1);
/* compute OL3 header size, defined in 4 Bytes */
ol3_len = l4.hdr - l3.hdr;
- hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
- HNS3_TXD_L3LEN_S, ol3_len >> 2);
+ hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
+ HNS3_TXD_L3LEN_S, ol3_len >> 2);
/* MAC in UDP, MAC in GRE (0x6558)*/
if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
@@ -609,16 +633,17 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
/* compute OL4 header size, defined in 4 Bytes. */
ol4_len = l2_hdr - l4.hdr;
- hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M,
- HNS3_TXD_L4LEN_S, ol4_len >> 2);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
+ ol4_len >> 2);
/* switch IP header ptr from outer to inner header */
l3.hdr = skb_inner_network_header(skb);
/* compute inner l2 header size, defined in 2 Bytes. */
l2_len = l3.hdr - l2_hdr;
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
- HNS3_TXD_L2LEN_S, l2_len >> 1);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
+ HNS3_TXD_L2LEN_S, l2_len >> 1);
} else {
/* skb packet types not supported by hardware,
* txbd len fild doesn't be filled.
@@ -634,22 +659,24 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
/* compute inner(/normal) L3 header size, defined in 4 Bytes */
l3_len = l4.hdr - l3.hdr;
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
- HNS3_TXD_L3LEN_S, l3_len >> 2);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
+ HNS3_TXD_L3LEN_S, l3_len >> 2);
/* compute inner(/normal) L4 header size, defined in 4 Bytes */
switch (l4_proto) {
case IPPROTO_TCP:
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
- HNS3_TXD_L4LEN_S, l4.tcp->doff);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
+ HNS3_TXD_L4LEN_S, l4.tcp->doff);
break;
case IPPROTO_SCTP:
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
- HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2));
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
+ HNS3_TXD_L4LEN_S,
+ (sizeof(struct sctphdr) >> 2));
break;
case IPPROTO_UDP:
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
- HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2));
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
+ HNS3_TXD_L4LEN_S,
+ (sizeof(struct udphdr) >> 2));
break;
default:
/* skb packet types not supported by hardware,
@@ -703,32 +730,34 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
/* define outer network header type.*/
if (skb->protocol == htons(ETH_P_IP)) {
if (skb_is_gso(skb))
- hnae_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
- HNS3_OL3T_IPV4_CSUM);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_OL3T_M,
+ HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV4_CSUM);
else
- hnae_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
- HNS3_OL3T_IPV4_NO_CSUM);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_OL3T_M,
+ HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV4_NO_CSUM);
} else if (skb->protocol == htons(ETH_P_IPV6)) {
- hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
- HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
+ hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
+ HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
}
/* define tunnel type(OL4).*/
switch (l4_proto) {
case IPPROTO_UDP:
- hnae_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_TUNTYPE_M,
- HNS3_TXD_TUNTYPE_S,
- HNS3_TUN_MAC_IN_UDP);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_TUNTYPE_M,
+ HNS3_TXD_TUNTYPE_S,
+ HNS3_TUN_MAC_IN_UDP);
break;
case IPPROTO_GRE:
- hnae_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_TUNTYPE_M,
- HNS3_TXD_TUNTYPE_S,
- HNS3_TUN_NVGRE);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_TUNTYPE_M,
+ HNS3_TXD_TUNTYPE_S,
+ HNS3_TUN_NVGRE);
break;
default:
/* drop the skb tunnel packet if hardware don't support,
@@ -749,43 +778,43 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
}
if (l3.v4->version == 4) {
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
- HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
+ HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
/* the stack computes the IP header already, the only time we
* need the hardware to recompute it is in the case of TSO.
*/
if (skb_is_gso(skb))
- hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
-
- hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
} else if (l3.v6->version == 6) {
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
- HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
- hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
+ HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
}
switch (l4_proto) {
case IPPROTO_TCP:
- hnae_set_field(*type_cs_vlan_tso,
- HNS3_TXD_L4T_M,
- HNS3_TXD_L4T_S,
- HNS3_L4T_TCP);
+ hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hnae3_set_field(*type_cs_vlan_tso,
+ HNS3_TXD_L4T_M,
+ HNS3_TXD_L4T_S,
+ HNS3_L4T_TCP);
break;
case IPPROTO_UDP:
if (hns3_tunnel_csum_bug(skb))
break;
- hnae_set_field(*type_cs_vlan_tso,
- HNS3_TXD_L4T_M,
- HNS3_TXD_L4T_S,
- HNS3_L4T_UDP);
+ hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hnae3_set_field(*type_cs_vlan_tso,
+ HNS3_TXD_L4T_M,
+ HNS3_TXD_L4T_S,
+ HNS3_L4T_UDP);
break;
case IPPROTO_SCTP:
- hnae_set_field(*type_cs_vlan_tso,
- HNS3_TXD_L4T_M,
- HNS3_TXD_L4T_S,
- HNS3_L4T_SCTP);
+ hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hnae3_set_field(*type_cs_vlan_tso,
+ HNS3_TXD_L4T_M,
+ HNS3_TXD_L4T_S,
+ HNS3_L4T_SCTP);
break;
default:
/* drop the skb tunnel packet if hardware don't support,
@@ -807,11 +836,11 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
{
/* Config bd buffer end */
- hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
- HNS3_TXD_BDTYPE_S, 0);
- hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
- hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
- hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
+ hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
+ HNS3_TXD_BDTYPE_S, 0);
+ hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
+ hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
+ hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
}
static int hns3_fill_desc_vtags(struct sk_buff *skb,
@@ -844,10 +873,10 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
* and use inner_vtag in one tag case.
*/
if (skb->protocol == htons(ETH_P_8021Q)) {
- hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
+ hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
*out_vtag = vlan_tag;
} else {
- hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
+ hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
*inner_vtag = vlan_tag;
}
} else if (skb->protocol == htons(ETH_P_8021Q)) {
@@ -880,7 +909,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
u16 out_vtag = 0;
u32 paylen = 0;
u16 mss = 0;
- __be16 protocol;
u8 ol4_proto;
u8 il4_proto;
int ret;
@@ -909,7 +937,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb_reset_mac_len(skb);
- protocol = skb->protocol;
ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
if (ret)
@@ -1135,7 +1162,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
wmb(); /* Commit all data before submit */
- hnae_queue_xmit(ring->tqp, buf_num);
+ hnae3_queue_xmit(ring->tqp, buf_num);
return NETDEV_TX_OK;
@@ -1304,7 +1331,6 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
u16 mode = mqprio_qopt->mode;
u8 hw = mqprio_qopt->qopt.hw;
bool if_running;
- unsigned int i;
int ret;
if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
@@ -1328,24 +1354,6 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
if (ret)
goto out;
- if (tc <= 1) {
- netdev_reset_tc(netdev);
- } else {
- ret = netdev_set_num_tc(netdev, tc);
- if (ret)
- goto out;
-
- for (i = 0; i < HNAE3_MAX_TC; i++) {
- if (!kinfo->tc_info[i].enable)
- continue;
-
- netdev_set_tc_queue(netdev,
- kinfo->tc_info[i].tc,
- kinfo->tc_info[i].tqp_count,
- kinfo->tc_info[i].tqp_offset);
- }
- }
-
ret = hns3_nic_set_real_num_queue(netdev);
out:
@@ -1665,6 +1673,9 @@ static struct pci_driver hns3_driver = {
/* set default feature to hns3 */
static void hns3_set_default_feature(struct net_device *netdev)
{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct pci_dev *pdev = h->pdev;
+
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -1698,12 +1709,15 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ if (pdev->revision != 0x20)
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
}
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
struct hns3_desc_cb *cb)
{
- unsigned int order = hnae_page_order(ring);
+ unsigned int order = hnae3_page_order(ring);
struct page *p;
p = dev_alloc_pages(order);
@@ -1714,7 +1728,7 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
cb->page_offset = 0;
cb->reuse_flag = 0;
cb->buf = page_address(p);
- cb->length = hnae_page_size(ring);
+ cb->length = hnae3_page_size(ring);
cb->type = DESC_TYPE_PAGE;
return 0;
@@ -1780,33 +1794,27 @@ static void hns3_free_buffers(struct hns3_enet_ring *ring)
/* free desc along with its attached buffer */
static void hns3_free_desc(struct hns3_enet_ring *ring)
{
+ int size = ring->desc_num * sizeof(ring->desc[0]);
+
hns3_free_buffers(ring);
- dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
- ring->desc_num * sizeof(ring->desc[0]),
- DMA_BIDIRECTIONAL);
- ring->desc_dma_addr = 0;
- kfree(ring->desc);
- ring->desc = NULL;
+ if (ring->desc) {
+ dma_free_coherent(ring_to_dev(ring), size,
+ ring->desc, ring->desc_dma_addr);
+ ring->desc = NULL;
+ }
}
static int hns3_alloc_desc(struct hns3_enet_ring *ring)
{
int size = ring->desc_num * sizeof(ring->desc[0]);
- ring->desc = kzalloc(size, GFP_KERNEL);
+ ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size,
+ &ring->desc_dma_addr,
+ GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
- ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc,
- size, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
- ring->desc_dma_addr = 0;
- kfree(ring->desc);
- ring->desc = NULL;
- return -ENOMEM;
- }
-
return 0;
}
@@ -1887,7 +1895,7 @@ static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
(*pkts) += (desc_cb->type == DESC_TYPE_SKB);
(*bytes) += desc_cb->length;
- /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
+ /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/
hns3_free_buffer_detach(ring, ring->next_to_clean);
ring_ptr_move_fw(ring, next_to_clean);
@@ -1917,7 +1925,7 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
if (is_ring_empty(ring) || head == ring->next_to_clean)
return true; /* no data to poll */
- if (!is_valid_clean_head(ring, head)) {
+ if (unlikely(!is_valid_clean_head(ring, head))) {
netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
ring->next_to_use, ring->next_to_clean);
@@ -2016,15 +2024,15 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
bool twobufs;
twobufs = ((PAGE_SIZE < 8192) &&
- hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
+ hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
desc = &ring->desc[ring->next_to_clean];
size = le16_to_cpu(desc->rx.size);
- truesize = hnae_buf_size(ring);
+ truesize = hnae3_buf_size(ring);
if (!twobufs)
- last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
+ last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring);
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize);
@@ -2076,13 +2084,13 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
return;
/* check if hardware has done checksum */
- if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
+ if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
return;
- if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) ||
- hnae_get_bit(l234info, HNS3_RXD_L4E_B) ||
- hnae_get_bit(l234info, HNS3_RXD_OL3E_B) ||
- hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) {
+ if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) ||
+ hnae3_get_bit(l234info, HNS3_RXD_L4E_B) ||
+ hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) ||
+ hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) {
netdev_err(netdev, "L3/L4 error pkt\n");
u64_stats_update_begin(&ring->syncp);
ring->stats.l3l4_csum_err++;
@@ -2091,23 +2099,25 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
return;
}
- l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M,
- HNS3_RXD_L3ID_S);
- l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M,
- HNS3_RXD_L4ID_S);
+ l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
+ HNS3_RXD_L3ID_S);
+ l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
+ HNS3_RXD_L4ID_S);
- ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
+ ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M,
+ HNS3_RXD_OL4ID_S);
switch (ol4_type) {
case HNS3_OL4_TYPE_MAC_IN_UDP:
case HNS3_OL4_TYPE_NVGRE:
skb->csum_level = 1;
+ /* fall through */
case HNS3_OL4_TYPE_NO_TUN:
/* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
- if (l3_type == HNS3_L3_TYPE_IPV4 ||
- (l3_type == HNS3_L3_TYPE_IPV6 &&
- (l4_type == HNS3_L4_TYPE_UDP ||
- l4_type == HNS3_L4_TYPE_TCP ||
- l4_type == HNS3_L4_TYPE_SCTP)))
+ if ((l3_type == HNS3_L3_TYPE_IPV4 ||
+ l3_type == HNS3_L3_TYPE_IPV6) &&
+ (l4_type == HNS3_L4_TYPE_UDP ||
+ l4_type == HNS3_L4_TYPE_TCP ||
+ l4_type == HNS3_L4_TYPE_SCTP))
skb->ip_summed = CHECKSUM_UNNECESSARY;
break;
}
@@ -2135,8 +2145,8 @@ static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
#define HNS3_STRP_OUTER_VLAN 0x1
#define HNS3_STRP_INNER_VLAN 0x2
- switch (hnae_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
- HNS3_RXD_STRP_TAGP_S)) {
+ switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
+ HNS3_RXD_STRP_TAGP_S)) {
case HNS3_STRP_OUTER_VLAN:
vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
break;
@@ -2174,7 +2184,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
/* Check valid BD */
- if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))
+ if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
return -EFAULT;
va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
@@ -2229,7 +2239,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
ring_ptr_move_fw(ring, next_to_clean);
- while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
+ while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
desc = &ring->desc[ring->next_to_clean];
desc_cb = &ring->desc_cb[ring->next_to_clean];
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
@@ -2257,7 +2267,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
vlan_tag);
}
- if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
+ if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
((u64 *)desc)[0], ((u64 *)desc)[1]);
u64_stats_update_begin(&ring->syncp);
@@ -2269,7 +2279,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
}
if (unlikely((!desc->rx.pkt_len) ||
- hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
+ hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
netdev_err(netdev, "truncated pkt\n");
u64_stats_update_begin(&ring->syncp);
ring->stats.err_pkt_len++;
@@ -2279,7 +2289,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
return -EFAULT;
}
- if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) {
+ if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) {
netdev_err(netdev, "L2 error pkt\n");
u64_stats_update_begin(&ring->syncp);
ring->stats.l2_err++;
@@ -2532,10 +2542,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
tx_ring = tqp_vector->tx_group.ring;
if (tx_ring) {
cur_chain->tqp_index = tx_ring->tqp->tqp_index;
- hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_TX);
- hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
+ hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
+ HNAE3_RING_TYPE_TX);
+ hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
cur_chain->next = NULL;
@@ -2549,12 +2559,12 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
cur_chain->next = chain;
chain->tqp_index = tx_ring->tqp->tqp_index;
- hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_TX);
- hnae_set_field(chain->int_gl_idx,
- HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S,
- HNAE3_RING_GL_TX);
+ hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
+ HNAE3_RING_TYPE_TX);
+ hnae3_set_field(chain->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S,
+ HNAE3_RING_GL_TX);
cur_chain = chain;
}
@@ -2564,10 +2574,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
if (!tx_ring && rx_ring) {
cur_chain->next = NULL;
cur_chain->tqp_index = rx_ring->tqp->tqp_index;
- hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_RX);
- hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
+ hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
+ HNAE3_RING_TYPE_RX);
+ hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
rx_ring = rx_ring->next;
}
@@ -2579,10 +2589,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
cur_chain->next = chain;
chain->tqp_index = rx_ring->tqp->tqp_index;
- hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_RX);
- hnae_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
+ hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
+ HNAE3_RING_TYPE_RX);
+ hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
cur_chain = chain;
@@ -2745,10 +2755,6 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
if (ret)
return ret;
- ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
- if (ret)
- return ret;
-
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
@@ -2809,7 +2815,7 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
ring->io_base = q->io_base;
}
- hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
+ hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
ring->tqp = q;
ring->desc = NULL;
@@ -2969,13 +2975,33 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
(u32)((dma >> 31) >> 1));
- hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG,
- hns3_buf_size2type(ring->buf_size));
hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
ring->desc_num / 8 - 1);
}
}
+static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
+{
+ struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
+ int i;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
+ int j;
+
+ if (!tc_info->enable)
+ continue;
+
+ for (j = 0; j < tc_info->tqp_count; j++) {
+ struct hnae3_queue *q;
+
+ q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp;
+ hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG,
+ tc_info->tc);
+ }
+ }
+}
+
int hns3_init_all_ring(struct hns3_nic_priv *priv)
{
struct hnae3_handle *h = priv->ae_handle;
@@ -3081,7 +3107,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
priv->dev = &pdev->dev;
priv->netdev = netdev;
priv->ae_handle = handle;
- priv->ae_handle->reset_level = HNAE3_NONE_RESET;
priv->ae_handle->last_reset_time = jiffies;
priv->tx_timeout_count = 0;
@@ -3102,6 +3127,11 @@ static int hns3_client_init(struct hnae3_handle *handle)
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
+ if (handle->flags & HNAE3_SUPPORT_VF)
+ handle->reset_level = HNAE3_VF_RESET;
+ else
+ handle->reset_level = HNAE3_FUNC_RESET;
+
ret = hns3_get_ring_config(priv);
if (ret) {
ret = -ENOMEM;
@@ -3208,7 +3238,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
struct net_device *ndev = kinfo->netdev;
bool if_running;
int ret;
- u8 i;
if (tc > HNAE3_MAX_TC)
return -EINVAL;
@@ -3218,10 +3247,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
if_running = netif_running(ndev);
- ret = netdev_set_num_tc(ndev, tc);
- if (ret)
- return ret;
-
if (if_running) {
(void)hns3_nic_net_stop(ndev);
msleep(100);
@@ -3232,27 +3257,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
if (ret)
goto err_out;
- if (tc <= 1) {
- netdev_reset_tc(ndev);
- goto out;
- }
-
- for (i = 0; i < HNAE3_MAX_TC; i++) {
- struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
-
- if (tc_info->enable)
- netdev_set_tc_queue(ndev,
- tc_info->tc,
- tc_info->tqp_count,
- tc_info->tqp_offset);
- }
-
- for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
- netdev_set_prio_tc_map(ndev, i,
- kinfo->prio_tc[i]);
- }
-
-out:
ret = hns3_nic_set_real_num_queue(ndev);
err_out:
@@ -3409,6 +3413,8 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
rx_ring->next_to_use = 0;
}
+ hns3_init_tx_ring_tc(priv);
+
return 0;
}
@@ -3418,7 +3424,7 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
struct net_device *ndev = kinfo->netdev;
if (!netif_running(ndev))
- return -EIO;
+ return 0;
return hns3_nic_net_stop(ndev);
}
@@ -3458,10 +3464,6 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
- ret = hns3_get_ring_config(priv);
- if (ret)
- return ret;
-
ret = hns3_nic_init_vector_data(priv);
if (ret)
return ret;
@@ -3493,10 +3495,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
if (ret)
netdev_err(netdev, "uninit ring error\n");
- hns3_put_ring_config(priv);
-
- priv->ring_data = NULL;
-
hns3_uninit_mac_addr(netdev);
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 3b083d5ae9ce..a02a96aee2a2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HNS3_ENET_H
#define __HNS3_ENET_H
@@ -43,7 +37,7 @@ enum hns3_nic_state {
#define HNS3_RING_TX_RING_BASEADDR_L_REG 0x00040
#define HNS3_RING_TX_RING_BASEADDR_H_REG 0x00044
#define HNS3_RING_TX_RING_BD_NUM_REG 0x00048
-#define HNS3_RING_TX_RING_BD_LEN_REG 0x0004C
+#define HNS3_RING_TX_RING_TC_REG 0x00050
#define HNS3_RING_TX_RING_TAIL_REG 0x00058
#define HNS3_RING_TX_RING_HEAD_REG 0x0005C
#define HNS3_RING_TX_RING_FBDNUM_REG 0x00060
@@ -499,7 +493,6 @@ struct hns3_enet_tqp_vector {
u16 num_tqps; /* total number of tqps in TQP vector */
- cpumask_t affinity_mask;
char name[HNAE3_INT_NAME_LEN];
/* when 0 should adjust interrupt coalesce parameter */
@@ -591,7 +584,7 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
#define hns3_write_dev(a, reg, value) \
hns3_write_reg((a)->io_base, (reg), (value))
-#define hnae_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \
+#define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \
(tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG)
#define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev)
@@ -601,9 +594,9 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
#define tx_ring_data(priv, idx) ((priv)->ring_data[idx])
-#define hnae_buf_size(_ring) ((_ring)->buf_size)
-#define hnae_page_order(_ring) (get_order(hnae_buf_size(_ring)))
-#define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring))
+#define hnae3_buf_size(_ring) ((_ring)->buf_size)
+#define hnae3_page_order(_ring) (get_order(hnae3_buf_size(_ring)))
+#define hnae3_page_size(_ring) (PAGE_SIZE << hnae3_page_order(_ring))
/* iterator for handling rings in ring group */
#define hns3_for_each_ring(pos, head) \
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 40c0425b4023..f70ee6910ee2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016~2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/etherdevice.h>
#include <linux/string.h>
@@ -59,7 +53,7 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT)
-#define HNS3_SELF_TEST_TPYE_NUM 1
+#define HNS3_SELF_TEST_TYPE_NUM 2
#define HNS3_NIC_LB_TEST_PKT_NUM 1
#define HNS3_NIC_LB_TEST_RING_ID 0
#define HNS3_NIC_LB_TEST_PACKET_SIZE 128
@@ -84,6 +78,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
return -EOPNOTSUPP;
switch (loop) {
+ case HNAE3_MAC_INTER_LOOP_SERDES:
case HNAE3_MAC_INTER_LOOP_MAC:
ret = h->ae_algo->ops->set_loopback(h, loop, en);
break;
@@ -201,7 +196,9 @@ static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget)
rx_group = &ring->tqp_vector->rx_group;
pre_rx_pkt = rx_group->total_packets;
+ preempt_disable();
hns3_clean_rx_ring(ring, budget, hns3_lb_check_skb_data);
+ preempt_enable();
rcv_good_pkt_total += (rx_group->total_packets - pre_rx_pkt);
rx_group->total_packets = pre_rx_pkt;
@@ -291,7 +288,7 @@ static void hns3_self_test(struct net_device *ndev,
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
- int st_param[HNS3_SELF_TEST_TPYE_NUM][2];
+ int st_param[HNS3_SELF_TEST_TYPE_NUM][2];
bool if_running = netif_running(ndev);
#if IS_ENABLED(CONFIG_VLAN_8021Q)
bool dis_vlan_filter;
@@ -307,6 +304,10 @@ static void hns3_self_test(struct net_device *ndev,
st_param[HNAE3_MAC_INTER_LOOP_MAC][1] =
h->flags & HNAE3_SUPPORT_MAC_LOOPBACK;
+ st_param[HNAE3_MAC_INTER_LOOP_SERDES][0] = HNAE3_MAC_INTER_LOOP_SERDES;
+ st_param[HNAE3_MAC_INTER_LOOP_SERDES][1] =
+ h->flags & HNAE3_SUPPORT_SERDES_LOOPBACK;
+
if (if_running)
dev_close(ndev);
@@ -320,7 +321,7 @@ static void hns3_self_test(struct net_device *ndev,
set_bit(HNS3_NIC_STATE_TESTING, &priv->state);
- for (i = 0; i < HNS3_SELF_TEST_TPYE_NUM; i++) {
+ for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) {
enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0];
if (!st_param[i][1])
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index c36d64710fa6..ac13cb2b168e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016~2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/dma-mapping.h>
#include <linux/slab.h>
@@ -18,8 +12,7 @@
#include "hclge_main.h"
#define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ)
-#define hclge_ring_to_dma_dir(ring) (hclge_is_csq(ring) ? \
- DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
static int hclge_ring_space(struct hclge_cmq_ring *ring)
@@ -46,31 +39,24 @@ static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
{
int size = ring->desc_num * sizeof(struct hclge_desc);
- ring->desc = kzalloc(size, GFP_KERNEL);
+ ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
+ size, &ring->desc_dma_addr,
+ GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
- ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc,
- size, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) {
- ring->desc_dma_addr = 0;
- kfree(ring->desc);
- ring->desc = NULL;
- return -ENOMEM;
- }
-
return 0;
}
static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring)
{
- dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr,
- ring->desc_num * sizeof(ring->desc[0]),
- DMA_BIDIRECTIONAL);
+ int size = ring->desc_num * sizeof(struct hclge_desc);
- ring->desc_dma_addr = 0;
- kfree(ring->desc);
- ring->desc = NULL;
+ if (ring->desc) {
+ dma_free_coherent(cmq_ring_to_dev(ring), size,
+ ring->desc, ring->desc_dma_addr);
+ ring->desc = NULL;
+ }
}
static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type)
@@ -80,7 +66,7 @@ static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type)
(ring_type == HCLGE_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
int ret;
- ring->flag = ring_type;
+ ring->ring_type = ring_type;
ring->dev = hdev;
ret = hclge_alloc_cmd_desc(ring);
@@ -111,8 +97,6 @@ void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
if (is_read)
desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
- else
- desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
}
static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
@@ -121,26 +105,26 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
struct hclge_dev *hdev = ring->dev;
struct hclge_hw *hw = &hdev->hw;
- if (ring->flag == HCLGE_TYPE_CSQ) {
+ if (ring->ring_type == HCLGE_TYPE_CSQ) {
hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
- (u32)dma);
+ lower_32_bits(dma));
hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
- (u32)((dma >> 31) >> 1));
+ upper_32_bits(dma));
hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG,
(ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
HCLGE_NIC_CMQ_ENABLE);
- hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
} else {
hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG,
- (u32)dma);
+ lower_32_bits(dma));
hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG,
- (u32)((dma >> 31) >> 1));
+ upper_32_bits(dma));
hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG,
(ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
HCLGE_NIC_CMQ_ENABLE);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
+ hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
}
}
@@ -152,33 +136,27 @@ static void hclge_cmd_init_regs(struct hclge_hw *hw)
static int hclge_cmd_csq_clean(struct hclge_hw *hw)
{
- struct hclge_dev *hdev = (struct hclge_dev *)hw->back;
+ struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
struct hclge_cmq_ring *csq = &hw->cmq.csq;
- u16 ntc = csq->next_to_clean;
- struct hclge_desc *desc;
- int clean = 0;
u32 head;
+ int clean;
- desc = &csq->desc[ntc];
head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
rmb(); /* Make sure head is ready before touch any data */
if (!is_valid_csq_clean_head(csq, head)) {
- dev_warn(&hdev->pdev->dev, "wrong head (%d, %d-%d)\n", head,
- csq->next_to_use, csq->next_to_clean);
- return 0;
- }
-
- while (head != ntc) {
- memset(desc, 0, sizeof(*desc));
- ntc++;
- if (ntc == csq->desc_num)
- ntc = 0;
- desc = &csq->desc[ntc];
- clean++;
+ dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
+ csq->next_to_use, csq->next_to_clean);
+ dev_warn(&hdev->pdev->dev,
+ "Disabling any further commands to IMP firmware\n");
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ dev_warn(&hdev->pdev->dev,
+ "IMP firmware watchdog reset soon expected!\n");
+ return -EIO;
}
- csq->next_to_clean = ntc;
+ clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
+ csq->next_to_clean = head;
return clean;
}
@@ -216,7 +194,7 @@ static bool hclge_is_special_opcode(u16 opcode)
**/
int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
{
- struct hclge_dev *hdev = (struct hclge_dev *)hw->back;
+ struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
struct hclge_desc *desc_to_use;
bool complete = false;
u32 timeout = 0;
@@ -227,7 +205,8 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
spin_lock_bh(&hw->cmq.csq.lock);
- if (num > hclge_ring_space(&hw->cmq.csq)) {
+ if (num > hclge_ring_space(&hw->cmq.csq) ||
+ test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
spin_unlock_bh(&hw->cmq.csq.lock);
return -EBUSY;
}
@@ -256,33 +235,34 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
*/
if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
do {
- if (hclge_cmd_csq_done(hw))
+ if (hclge_cmd_csq_done(hw)) {
+ complete = true;
break;
+ }
udelay(1);
timeout++;
} while (timeout < hw->cmq.tx_timeout);
}
- if (hclge_cmd_csq_done(hw)) {
- complete = true;
+ if (!complete) {
+ retval = -EAGAIN;
+ } else {
handle = 0;
while (handle < num) {
/* Get the result of hardware write back */
desc_to_use = &hw->cmq.csq.desc[ntc];
desc[handle] = *desc_to_use;
- pr_debug("Get cmd desc:\n");
if (likely(!hclge_is_special_opcode(opcode)))
desc_ret = le16_to_cpu(desc[handle].retval);
else
desc_ret = le16_to_cpu(desc[0].retval);
- if ((enum hclge_cmd_return_status)desc_ret ==
- HCLGE_CMD_EXEC_SUCCESS)
+ if (desc_ret == HCLGE_CMD_EXEC_SUCCESS)
retval = 0;
else
retval = -EIO;
- hw->cmq.last_status = (enum hclge_cmd_status)desc_ret;
+ hw->cmq.last_status = desc_ret;
ntc++;
handle++;
if (ntc == hw->cmq.csq.desc_num)
@@ -290,15 +270,13 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
}
}
- if (!complete)
- retval = -EAGAIN;
-
/* Clean the command send queue */
handle = hclge_cmd_csq_clean(hw);
- if (handle != num) {
+ if (handle < 0)
+ retval = handle;
+ else if (handle != num)
dev_warn(&hdev->pdev->dev,
"cleaned %d, need to clean %d\n", handle, num);
- }
spin_unlock_bh(&hw->cmq.csq.lock);
@@ -369,6 +347,7 @@ int hclge_cmd_init(struct hclge_dev *hdev)
spin_lock_init(&hdev->hw.cmq.crq.lock);
hclge_cmd_init_regs(&hdev->hw);
+ clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
if (ret) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index d9aaa76c76eb..821d4c2f84bd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016~2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HCLGE_CMD_H
#define __HCLGE_CMD_H
@@ -27,17 +21,10 @@ struct hclge_desc {
__le32 data[6];
};
-struct hclge_desc_cb {
- dma_addr_t dma;
- void *va;
- u32 length;
-};
-
struct hclge_cmq_ring {
dma_addr_t desc_dma_addr;
struct hclge_desc *desc;
- struct hclge_desc_cb *desc_cb;
- struct hclge_dev *dev;
+ struct hclge_dev *dev;
u32 head;
u32 tail;
@@ -45,7 +32,7 @@ struct hclge_cmq_ring {
u16 desc_num;
int next_to_use;
int next_to_clean;
- u8 flag;
+ u8 ring_type; /* cmq ring type */
spinlock_t lock; /* Command queue lock */
};
@@ -71,26 +58,19 @@ struct hclge_misc_vector {
struct hclge_cmq {
struct hclge_cmq_ring csq;
struct hclge_cmq_ring crq;
- u16 tx_timeout; /* Tx timeout */
+ u16 tx_timeout;
enum hclge_cmd_status last_status;
};
-#define HCLGE_CMD_FLAG_IN_VALID_SHIFT 0
-#define HCLGE_CMD_FLAG_OUT_VALID_SHIFT 1
-#define HCLGE_CMD_FLAG_NEXT_SHIFT 2
-#define HCLGE_CMD_FLAG_WR_OR_RD_SHIFT 3
-#define HCLGE_CMD_FLAG_NO_INTR_SHIFT 4
-#define HCLGE_CMD_FLAG_ERR_INTR_SHIFT 5
-
-#define HCLGE_CMD_FLAG_IN BIT(HCLGE_CMD_FLAG_IN_VALID_SHIFT)
-#define HCLGE_CMD_FLAG_OUT BIT(HCLGE_CMD_FLAG_OUT_VALID_SHIFT)
-#define HCLGE_CMD_FLAG_NEXT BIT(HCLGE_CMD_FLAG_NEXT_SHIFT)
-#define HCLGE_CMD_FLAG_WR BIT(HCLGE_CMD_FLAG_WR_OR_RD_SHIFT)
-#define HCLGE_CMD_FLAG_NO_INTR BIT(HCLGE_CMD_FLAG_NO_INTR_SHIFT)
-#define HCLGE_CMD_FLAG_ERR_INTR BIT(HCLGE_CMD_FLAG_ERR_INTR_SHIFT)
+#define HCLGE_CMD_FLAG_IN BIT(0)
+#define HCLGE_CMD_FLAG_OUT BIT(1)
+#define HCLGE_CMD_FLAG_NEXT BIT(2)
+#define HCLGE_CMD_FLAG_WR BIT(3)
+#define HCLGE_CMD_FLAG_NO_INTR BIT(4)
+#define HCLGE_CMD_FLAG_ERR_INTR BIT(5)
enum hclge_opcode_type {
- /* Generic command */
+ /* Generic commands */
HCLGE_OPC_QUERY_FW_VER = 0x0001,
HCLGE_OPC_CFG_RST_TRIGGER = 0x0020,
HCLGE_OPC_GBL_RST_STATUS = 0x0021,
@@ -106,18 +86,17 @@ enum hclge_opcode_type {
HCLGE_OPC_QUERY_REG_NUM = 0x0040,
HCLGE_OPC_QUERY_32_BIT_REG = 0x0041,
HCLGE_OPC_QUERY_64_BIT_REG = 0x0042,
- /* Device management command */
- /* MAC commond */
+ /* MAC command */
HCLGE_OPC_CONFIG_MAC_MODE = 0x0301,
HCLGE_OPC_CONFIG_AN_MODE = 0x0304,
HCLGE_OPC_QUERY_AN_RESULT = 0x0306,
HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
- /* MACSEC command */
+ HCLGE_OPC_SERDES_LOOPBACK = 0x0315,
- /* PFC/Pause CMD*/
+ /* PFC/Pause commands */
HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701,
HCLGE_OPC_CFG_PFC_PAUSE_EN = 0x0702,
HCLGE_OPC_CFG_MAC_PARA = 0x0703,
@@ -148,7 +127,7 @@ enum hclge_opcode_type {
HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814,
HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815,
- /* Packet buffer allocate command */
+ /* Packet buffer allocate commands */
HCLGE_OPC_TX_BUFF_ALLOC = 0x0901,
HCLGE_OPC_RX_PRIV_BUFF_ALLOC = 0x0902,
HCLGE_OPC_RX_PRIV_WL_ALLOC = 0x0903,
@@ -156,11 +135,10 @@ enum hclge_opcode_type {
HCLGE_OPC_RX_COM_WL_ALLOC = 0x0905,
HCLGE_OPC_RX_GBL_PKT_CNT = 0x0906,
- /* PTP command */
/* TQP management command */
HCLGE_OPC_SET_TQP_MAP = 0x0A01,
- /* TQP command */
+ /* TQP commands */
HCLGE_OPC_CFG_TX_QUEUE = 0x0B01,
HCLGE_OPC_QUERY_TX_POINTER = 0x0B02,
HCLGE_OPC_QUERY_TX_STATUS = 0x0B03,
@@ -172,10 +150,10 @@ enum hclge_opcode_type {
HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22,
- /* TSO cmd */
+ /* TSO command */
HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01,
- /* RSS cmd */
+ /* RSS commands */
HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01,
HCLGE_OPC_RSS_INDIR_TABLE = 0x0D07,
HCLGE_OPC_RSS_TC_MODE = 0x0D08,
@@ -184,15 +162,15 @@ enum hclge_opcode_type {
/* Promisuous mode command */
HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01,
- /* Vlan offload command */
+ /* Vlan offload commands */
HCLGE_OPC_VLAN_PORT_TX_CFG = 0x0F01,
HCLGE_OPC_VLAN_PORT_RX_CFG = 0x0F02,
- /* Interrupts cmd */
+ /* Interrupts commands */
HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503,
HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504,
- /* MAC command */
+ /* MAC commands */
HCLGE_OPC_MAC_VLAN_ADD = 0x1000,
HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001,
HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002,
@@ -201,13 +179,13 @@ enum hclge_opcode_type {
HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011,
HCLGE_OPC_MAC_VLAN_MASK_SET = 0x1012,
- /* Multicast linear table cmd */
+ /* Multicast linear table commands */
HCLGE_OPC_MTA_MAC_MODE_CFG = 0x1020,
HCLGE_OPC_MTA_MAC_FUNC_CFG = 0x1021,
HCLGE_OPC_MTA_TBL_ITEM_CFG = 0x1022,
HCLGE_OPC_MTA_TBL_ITEM_QUERY = 0x1023,
- /* VLAN command */
+ /* VLAN commands */
HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100,
HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101,
HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102,
@@ -215,7 +193,7 @@ enum hclge_opcode_type {
/* MDIO command */
HCLGE_OPC_MDIO_CONFIG = 0x1900,
- /* QCN command */
+ /* QCN commands */
HCLGE_OPC_QCN_MOD_CFG = 0x1A01,
HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02,
HCLGE_OPC_QCN_SHAPPING_IR_CFG = 0x1A03,
@@ -225,7 +203,7 @@ enum hclge_opcode_type {
HCLGE_OPC_QCN_AJUST_INIT = 0x1A07,
HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08,
- /* Mailbox cmd */
+ /* Mailbox command */
HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000,
/* Led command */
@@ -381,8 +359,10 @@ struct hclge_pf_res_cmd {
__le16 buf_size;
__le16 msixcap_localid_ba_nic;
__le16 msixcap_localid_ba_rocee;
+#define HCLGE_MSIX_OFT_ROCEE_S 0
+#define HCLGE_MSIX_OFT_ROCEE_M GENMASK(15, 0)
#define HCLGE_PF_VEC_NUM_S 0
-#define HCLGE_PF_VEC_NUM_M (0xff << HCLGE_PF_VEC_NUM_S)
+#define HCLGE_PF_VEC_NUM_M GENMASK(7, 0)
__le16 pf_intr_vector_number;
__le16 pf_own_fun_number;
__le32 rsv[3];
@@ -471,8 +451,8 @@ struct hclge_rss_tc_mode_cmd {
u8 rsv[8];
};
-#define HCLGE_LINK_STS_B 0
-#define HCLGE_LINK_STATUS BIT(HCLGE_LINK_STS_B)
+#define HCLGE_LINK_STATUS_UP_B 0
+#define HCLGE_LINK_STATUS_UP_M BIT(HCLGE_LINK_STATUS_UP_B)
struct hclge_link_status_cmd {
u8 status;
u8 rsv[23];
@@ -571,7 +551,8 @@ struct hclge_config_auto_neg_cmd {
struct hclge_config_max_frm_size_cmd {
__le16 max_frm_size;
- u8 rsv[22];
+ u8 min_frm_size;
+ u8 rsv[21];
};
enum hclge_mac_vlan_tbl_opcode {
@@ -581,13 +562,13 @@ enum hclge_mac_vlan_tbl_opcode {
HCLGE_MAC_VLAN_LKUP, /* Lookup a entry through mac_vlan key */
};
-#define HCLGE_MAC_VLAN_BIT0_EN_B 0x0
-#define HCLGE_MAC_VLAN_BIT1_EN_B 0x1
-#define HCLGE_MAC_EPORT_SW_EN_B 0xc
-#define HCLGE_MAC_EPORT_TYPE_B 0xb
-#define HCLGE_MAC_EPORT_VFID_S 0x3
+#define HCLGE_MAC_VLAN_BIT0_EN_B 0
+#define HCLGE_MAC_VLAN_BIT1_EN_B 1
+#define HCLGE_MAC_EPORT_SW_EN_B 12
+#define HCLGE_MAC_EPORT_TYPE_B 11
+#define HCLGE_MAC_EPORT_VFID_S 3
#define HCLGE_MAC_EPORT_VFID_M GENMASK(10, 3)
-#define HCLGE_MAC_EPORT_PFID_S 0x0
+#define HCLGE_MAC_EPORT_PFID_S 0
#define HCLGE_MAC_EPORT_PFID_M GENMASK(2, 0)
struct hclge_mac_vlan_tbl_entry_cmd {
u8 flags;
@@ -603,7 +584,7 @@ struct hclge_mac_vlan_tbl_entry_cmd {
u8 rsv2[6];
};
-#define HCLGE_VLAN_MASK_EN_B 0x0
+#define HCLGE_VLAN_MASK_EN_B 0
struct hclge_mac_vlan_mask_entry_cmd {
u8 rsv0[2];
u8 vlan_mask;
@@ -634,23 +615,23 @@ struct hclge_mac_mgr_tbl_entry_cmd {
u8 rsv3[2];
};
-#define HCLGE_CFG_MTA_MAC_SEL_S 0x0
+#define HCLGE_CFG_MTA_MAC_SEL_S 0
#define HCLGE_CFG_MTA_MAC_SEL_M GENMASK(1, 0)
-#define HCLGE_CFG_MTA_MAC_EN_B 0x7
+#define HCLGE_CFG_MTA_MAC_EN_B 7
struct hclge_mta_filter_mode_cmd {
u8 dmac_sel_en; /* Use lowest 2 bit as sel_mode, bit 7 as enable */
u8 rsv[23];
};
-#define HCLGE_CFG_FUNC_MTA_ACCEPT_B 0x0
+#define HCLGE_CFG_FUNC_MTA_ACCEPT_B 0
struct hclge_cfg_func_mta_filter_cmd {
u8 accept; /* Only used lowest 1 bit */
u8 function_id;
u8 rsv[22];
};
-#define HCLGE_CFG_MTA_ITEM_ACCEPT_B 0x0
-#define HCLGE_CFG_MTA_ITEM_IDX_S 0x0
+#define HCLGE_CFG_MTA_ITEM_ACCEPT_B 0
+#define HCLGE_CFG_MTA_ITEM_IDX_S 0
#define HCLGE_CFG_MTA_ITEM_IDX_M GENMASK(11, 0)
struct hclge_cfg_func_mta_item_cmd {
__le16 item_idx; /* Only used lowest 12 bit */
@@ -795,6 +776,17 @@ struct hclge_reset_cmd {
u8 fun_reset_vfid;
u8 rsv[22];
};
+
+#define HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B BIT(0)
+#define HCLGE_CMD_SERDES_DONE_B BIT(0)
+#define HCLGE_CMD_SERDES_SUCCESS_B BIT(1)
+struct hclge_serdes_lb_cmd {
+ u8 mask;
+ u8 enable;
+ u8 result;
+ u8 rsv[21];
+};
+
#define HCLGE_DEFAULT_TX_BUF 0x4000 /* 16k bytes */
#define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */
#define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index 955f0e3d5c95..f08ebb7caaaf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016-2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#include "hclge_main.h"
#include "hclge_tm.h"
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h
index 7d808ee96694..278f21e02736 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016~2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HCLGE_DCB_H__
#define __HCLGE_DCB_H__
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index d318d35e598f..8577dfc799ad 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016-2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/acpi.h>
#include <linux/device.h>
@@ -793,9 +787,10 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
count += 1;
handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK;
- } else {
- count = -EOPNOTSUPP;
}
+
+ count++;
+ handle->flags |= HNAE3_SUPPORT_SERDES_LOOPBACK;
} else if (stringset == ETH_SS_STATS) {
count = ARRAY_SIZE(g_mac_stats_string) +
ARRAY_SIZE(g_all_32bit_stats_string) +
@@ -938,18 +933,22 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
if (hnae3_dev_roce_supported(hdev)) {
+ hdev->roce_base_msix_offset =
+ hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
+ HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
hdev->num_roce_msi =
- hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
- HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
+ hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
+ HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
/* PF should have NIC vectors and Roce vectors,
* NIC vectors are queued before Roce vectors.
*/
- hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET;
+ hdev->num_msi = hdev->num_roce_msi +
+ hdev->roce_base_msix_offset;
} else {
hdev->num_msi =
- hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
- HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
+ hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
+ HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
}
return 0;
@@ -1038,38 +1037,38 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
req = (struct hclge_cfg_param_cmd *)desc[0].data;
/* get the configuration */
- cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]),
- HCLGE_CFG_VMDQ_M,
- HCLGE_CFG_VMDQ_S);
- cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
- HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
- cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
- HCLGE_CFG_TQP_DESC_N_M,
- HCLGE_CFG_TQP_DESC_N_S);
-
- cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]),
- HCLGE_CFG_PHY_ADDR_M,
- HCLGE_CFG_PHY_ADDR_S);
- cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]),
- HCLGE_CFG_MEDIA_TP_M,
- HCLGE_CFG_MEDIA_TP_S);
- cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]),
- HCLGE_CFG_RX_BUF_LEN_M,
- HCLGE_CFG_RX_BUF_LEN_S);
+ cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
+ HCLGE_CFG_VMDQ_M,
+ HCLGE_CFG_VMDQ_S);
+ cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
+ HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
+ cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
+ HCLGE_CFG_TQP_DESC_N_M,
+ HCLGE_CFG_TQP_DESC_N_S);
+
+ cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_PHY_ADDR_M,
+ HCLGE_CFG_PHY_ADDR_S);
+ cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_MEDIA_TP_M,
+ HCLGE_CFG_MEDIA_TP_S);
+ cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_RX_BUF_LEN_M,
+ HCLGE_CFG_RX_BUF_LEN_S);
/* get mac_address */
mac_addr_tmp = __le32_to_cpu(req->param[2]);
- mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]),
- HCLGE_CFG_MAC_ADDR_H_M,
- HCLGE_CFG_MAC_ADDR_H_S);
+ mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
+ HCLGE_CFG_MAC_ADDR_H_M,
+ HCLGE_CFG_MAC_ADDR_H_S);
mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
- cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]),
- HCLGE_CFG_DEFAULT_SPEED_M,
- HCLGE_CFG_DEFAULT_SPEED_S);
- cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]),
- HCLGE_CFG_RSS_SIZE_M,
- HCLGE_CFG_RSS_SIZE_S);
+ cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
+ HCLGE_CFG_DEFAULT_SPEED_M,
+ HCLGE_CFG_DEFAULT_SPEED_S);
+ cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
+ HCLGE_CFG_RSS_SIZE_M,
+ HCLGE_CFG_RSS_SIZE_S);
for (i = 0; i < ETH_ALEN; i++)
cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
@@ -1077,9 +1076,9 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
req = (struct hclge_cfg_param_cmd *)desc[1].data;
cfg->numa_node_map = __le32_to_cpu(req->param[0]);
- cfg->speed_ability = hnae_get_field(__le32_to_cpu(req->param[1]),
- HCLGE_CFG_SPEED_ABILITY_M,
- HCLGE_CFG_SPEED_ABILITY_S);
+ cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_SPEED_ABILITY_M,
+ HCLGE_CFG_SPEED_ABILITY_S);
}
/* hclge_get_cfg: query the static parameter from flash
@@ -1098,22 +1097,22 @@ static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
req = (struct hclge_cfg_param_cmd *)desc[i].data;
hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
true);
- hnae_set_field(offset, HCLGE_CFG_OFFSET_M,
- HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
+ hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
+ HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
/* Len should be united by 4 bytes when send to hardware */
- hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
- HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
+ hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
+ HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
req->offset = cpu_to_le32(offset);
}
ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
if (ret) {
- dev_err(&hdev->pdev->dev,
- "get config failed %d.\n", ret);
+ dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
return ret;
}
hclge_parse_cfg(hcfg, desc);
+
return 0;
}
@@ -1130,13 +1129,10 @@ static int hclge_get_cap(struct hclge_dev *hdev)
/* get pf resource */
ret = hclge_query_pf_resource(hdev);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "query pf resource error %d.\n", ret);
- return ret;
- }
+ if (ret)
+ dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
- return 0;
+ return ret;
}
static int hclge_configure(struct hclge_dev *hdev)
@@ -1189,7 +1185,7 @@ static int hclge_configure(struct hclge_dev *hdev)
/* Currently not support uncontiuous tc */
for (i = 0; i < hdev->tm_info.num_tc; i++)
- hnae_set_bit(hdev->hw_tc_map, i, 1);
+ hnae3_set_bit(hdev->hw_tc_map, i, 1);
hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
@@ -1208,13 +1204,13 @@ static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
req = (struct hclge_cfg_tso_status_cmd *)desc.data;
tso_mss = 0;
- hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
- HCLGE_TSO_MSS_MIN_S, tso_mss_min);
+ hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
+ HCLGE_TSO_MSS_MIN_S, tso_mss_min);
req->tso_mss_min = cpu_to_le16(tso_mss);
tso_mss = 0;
- hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
- HCLGE_TSO_MSS_MIN_S, tso_mss_max);
+ hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
+ HCLGE_TSO_MSS_MIN_S, tso_mss_max);
req->tso_mss_max = cpu_to_le16(tso_mss);
return hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -1265,44 +1261,43 @@ static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
req->tqp_vid = cpu_to_le16(tqp_vid);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev, "TQP map failed %d.\n",
- ret);
- return ret;
- }
+ if (ret)
+ dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
- return 0;
+ return ret;
}
-static int hclge_assign_tqp(struct hclge_vport *vport,
- struct hnae3_queue **tqp, u16 num_tqps)
+static int hclge_assign_tqp(struct hclge_vport *vport)
{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
struct hclge_dev *hdev = vport->back;
int i, alloced;
for (i = 0, alloced = 0; i < hdev->num_tqps &&
- alloced < num_tqps; i++) {
+ alloced < kinfo->num_tqps; i++) {
if (!hdev->htqp[i].alloced) {
hdev->htqp[i].q.handle = &vport->nic;
hdev->htqp[i].q.tqp_index = alloced;
- tqp[alloced] = &hdev->htqp[i].q;
+ hdev->htqp[i].q.desc_num = kinfo->num_desc;
+ kinfo->tqp[alloced] = &hdev->htqp[i].q;
hdev->htqp[i].alloced = true;
alloced++;
}
}
- vport->alloc_tqps = num_tqps;
+ vport->alloc_tqps = kinfo->num_tqps;
return 0;
}
-static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps)
+static int hclge_knic_setup(struct hclge_vport *vport,
+ u16 num_tqps, u16 num_desc)
{
struct hnae3_handle *nic = &vport->nic;
struct hnae3_knic_private_info *kinfo = &nic->kinfo;
struct hclge_dev *hdev = vport->back;
int i, ret;
- kinfo->num_desc = hdev->num_desc;
+ kinfo->num_desc = num_desc;
kinfo->rx_buf_len = hdev->rx_buf_len;
kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
kinfo->rss_size
@@ -1329,13 +1324,11 @@ static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps)
if (!kinfo->tqp)
return -ENOMEM;
- ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps);
- if (ret) {
+ ret = hclge_assign_tqp(vport);
+ if (ret)
dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
- return -EINVAL;
- }
- return 0;
+ return ret;
}
static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
@@ -1397,7 +1390,7 @@ static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
nic->numa_node_mask = hdev->numa_node_mask;
if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
- ret = hclge_knic_setup(vport, num_tqps);
+ ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc);
if (ret) {
dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
ret);
@@ -1487,13 +1480,11 @@ static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
}
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
ret);
- return ret;
- }
- return 0;
+ return ret;
}
static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
@@ -1501,13 +1492,10 @@ static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
{
int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "tx buffer alloc failed %d\n", ret);
- return ret;
- }
+ if (ret)
+ dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
- return 0;
+ return ret;
}
static int hclge_get_tc_num(struct hclge_dev *hdev)
@@ -1825,17 +1813,13 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
(1 << HCLGE_TC0_PRI_BUF_EN_B));
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"rx private buffer alloc cmd failed %d\n", ret);
- return ret;
- }
- return 0;
+ return ret;
}
-#define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0)
-
static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
{
@@ -1863,25 +1847,21 @@ static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
req->tc_wl[j].high =
cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
req->tc_wl[j].high |=
- cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) <<
- HCLGE_RX_PRIV_EN_B);
+ cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
req->tc_wl[j].low =
cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
req->tc_wl[j].low |=
- cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) <<
- HCLGE_RX_PRIV_EN_B);
+ cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
}
}
/* Send 2 descriptor at one time */
ret = hclge_cmd_send(&hdev->hw, desc, 2);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"rx private waterline config cmd failed %d\n",
ret);
- return ret;
- }
- return 0;
+ return ret;
}
static int hclge_common_thrd_config(struct hclge_dev *hdev,
@@ -1911,24 +1891,20 @@ static int hclge_common_thrd_config(struct hclge_dev *hdev,
req->com_thrd[j].high =
cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
req->com_thrd[j].high |=
- cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) <<
- HCLGE_RX_PRIV_EN_B);
+ cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
req->com_thrd[j].low =
cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
req->com_thrd[j].low |=
- cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) <<
- HCLGE_RX_PRIV_EN_B);
+ cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
}
}
/* Send 2 descriptors at one time */
ret = hclge_cmd_send(&hdev->hw, desc, 2);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"common threshold config cmd failed %d\n", ret);
- return ret;
- }
- return 0;
+ return ret;
}
static int hclge_common_wl_config(struct hclge_dev *hdev,
@@ -1943,23 +1919,17 @@ static int hclge_common_wl_config(struct hclge_dev *hdev,
req = (struct hclge_rx_com_wl *)desc.data;
req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
- req->com_wl.high |=
- cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) <<
- HCLGE_RX_PRIV_EN_B);
+ req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
- req->com_wl.low |=
- cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) <<
- HCLGE_RX_PRIV_EN_B);
+ req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"common waterline config cmd failed %d\n", ret);
- return ret;
- }
- return 0;
+ return ret;
}
int hclge_buffer_alloc(struct hclge_dev *hdev)
@@ -2074,7 +2044,7 @@ static int hclge_init_msi(struct hclge_dev *hdev)
hdev->num_msi_left = vectors;
hdev->base_msi_vector = pdev->irq;
hdev->roce_base_vector = hdev->base_msi_vector +
- HCLGE_ROCE_VECTOR_OFFSET;
+ hdev->roce_base_msix_offset;
hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
sizeof(u16), GFP_KERNEL);
@@ -2118,48 +2088,48 @@ int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
- hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
+ hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
switch (speed) {
case HCLGE_MAC_SPEED_10M:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 6);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 6);
break;
case HCLGE_MAC_SPEED_100M:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 7);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 7);
break;
case HCLGE_MAC_SPEED_1G:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 0);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 0);
break;
case HCLGE_MAC_SPEED_10G:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 1);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 1);
break;
case HCLGE_MAC_SPEED_25G:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 2);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 2);
break;
case HCLGE_MAC_SPEED_40G:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 3);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 3);
break;
case HCLGE_MAC_SPEED_50G:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 4);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 4);
break;
case HCLGE_MAC_SPEED_100G:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 5);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 5);
break;
default:
dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
return -EINVAL;
}
- hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
- 1);
+ hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
+ 1);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -2201,18 +2171,16 @@ static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
return ret;
}
- *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
- speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
- HCLGE_QUERY_SPEED_S);
+ *duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
+ speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
+ HCLGE_QUERY_SPEED_S);
ret = hclge_parse_speed(speed_tmp, speed);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"could not parse speed(=%d), %d\n", speed_tmp, ret);
- return -EIO;
- }
- return 0;
+ return ret;
}
static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
@@ -2225,17 +2193,15 @@ static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
req = (struct hclge_config_auto_neg_cmd *)desc.data;
- hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
+ hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
req->cfg_an_cmd_flag = cpu_to_le32(flag);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
ret);
- return ret;
- }
- return 0;
+ return ret;
}
static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
@@ -2269,8 +2235,8 @@ static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
- hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
- mask_vlan ? 1 : 0);
+ hnae3_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
+ mask_vlan ? 1 : 0);
ether_addr_copy(req->mac_mask, mac_mask);
status = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2341,13 +2307,11 @@ static int hclge_mac_init(struct hclge_dev *hdev)
mtu = ETH_DATA_LEN;
ret = hclge_set_mtu(handle, mtu);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"set mtu failed ret=%d\n", ret);
- return ret;
- }
- return 0;
+ return ret;
}
static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
@@ -2386,7 +2350,7 @@ static int hclge_get_mac_link_status(struct hclge_dev *hdev)
}
req = (struct hclge_link_status_cmd *)desc.data;
- link_status = req->status & HCLGE_LINK_STATUS;
+ link_status = req->status & HCLGE_LINK_STATUS_UP_M;
return !!link_status;
}
@@ -2505,7 +2469,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
u32 cmdq_src_reg;
/* fetch the events from their corresponding regs */
- rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
+ rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
/* Assumption: If by any chance reset and mailbox events are reported
@@ -2517,12 +2481,14 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
/* check for vector0 reset event sources */
if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
return HCLGE_VECTOR0_EVENT_RST;
}
if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
return HCLGE_VECTOR0_EVENT_RST;
@@ -2614,6 +2580,12 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
{
+ if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
+ dev_warn(&hdev->pdev->dev,
+ "vector(vector_id %d) has been freed.\n", vector_id);
+ return;
+ }
+
hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
hdev->num_msi_left += 1;
hdev->num_msi_used -= 1;
@@ -2705,7 +2677,7 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
}
val = hclge_read_dev(&hdev->hw, reg);
- while (hnae_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
+ while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
msleep(HCLGE_RESET_WATI_MS);
val = hclge_read_dev(&hdev->hw, reg);
cnt++;
@@ -2727,8 +2699,7 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
- hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_MAC_B, 0);
- hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
+ hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
req->fun_reset_vfid = func_id;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2747,13 +2718,13 @@ static void hclge_do_reset(struct hclge_dev *hdev)
switch (hdev->reset_type) {
case HNAE3_GLOBAL_RESET:
val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
- hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
+ hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
dev_info(&pdev->dev, "Global Reset requested\n");
break;
case HNAE3_CORE_RESET:
val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
- hnae_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
+ hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
dev_info(&pdev->dev, "Core Reset requested\n");
break;
@@ -2810,8 +2781,6 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
break;
default:
- dev_warn(&hdev->pdev->dev, "Unsupported reset event to clear:%d",
- hdev->reset_type);
break;
}
@@ -2824,16 +2793,17 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
static void hclge_reset(struct hclge_dev *hdev)
{
- /* perform reset of the stack & ae device for a client */
+ struct hnae3_handle *handle;
+ /* perform reset of the stack & ae device for a client */
+ handle = &hdev->vport[0].nic;
+ rtnl_lock();
hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
if (!hclge_reset_wait(hdev)) {
- rtnl_lock();
hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
hclge_reset_ae_dev(hdev->ae_dev);
hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
- rtnl_unlock();
hclge_clear_reset_cause(hdev);
} else {
@@ -2843,6 +2813,8 @@ static void hclge_reset(struct hclge_dev *hdev)
}
hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ handle->last_reset_time = jiffies;
+ rtnl_unlock();
}
static void hclge_reset_event(struct hnae3_handle *handle)
@@ -2855,8 +2827,13 @@ static void hclge_reset_event(struct hnae3_handle *handle)
* know this if last reset request did not occur very recently (watchdog
* timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
* In case of new request we reset the "reset level" to PF reset.
+ * And if it is a repeat reset request of the most recent one then we
+ * want to make sure we throttle the reset request. Therefore, we will
+ * not allow it again before 3*HZ times.
*/
- if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
+ if (time_before(jiffies, (handle->last_reset_time + 3 * HZ)))
+ return;
+ else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
handle->reset_level = HNAE3_FUNC_RESET;
dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
@@ -2868,8 +2845,6 @@ static void hclge_reset_event(struct hnae3_handle *handle)
if (handle->reset_level < HNAE3_GLOBAL_RESET)
handle->reset_level++;
-
- handle->last_reset_time = jiffies;
}
static void hclge_reset_subtask(struct hclge_dev *hdev)
@@ -3110,23 +3085,21 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
u16 mode = 0;
- hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
- hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M,
- HCLGE_RSS_TC_SIZE_S, tc_size[i]);
- hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
- HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
+ hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
+ hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
+ HCLGE_RSS_TC_SIZE_S, tc_size[i]);
+ hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
+ HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
req->rss_tc_mode[i] = cpu_to_le16(mode);
}
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"Configure rss tc mode fail, status = %d\n", ret);
- return ret;
- }
- return 0;
+ return ret;
}
static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
@@ -3149,13 +3122,10 @@ static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"Configure rss input fail, status = %d\n", ret);
- return ret;
- }
-
- return 0;
+ return ret;
}
static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
@@ -3491,16 +3461,16 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
i = 0;
for (node = ring_chain; node; node = node->next) {
tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
- hnae_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
- HCLGE_INT_TYPE_S,
- hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
- hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
- HCLGE_TQP_ID_S, node->tqp_index);
- hnae_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
- HCLGE_INT_GL_IDX_S,
- hnae_get_field(node->int_gl_idx,
- HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S));
+ hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
+ HCLGE_INT_TYPE_S,
+ hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
+ hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
+ HCLGE_TQP_ID_S, node->tqp_index);
+ hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
+ HCLGE_INT_GL_IDX_S,
+ hnae3_get_field(node->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S));
req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
@@ -3603,12 +3573,11 @@ int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"Set promisc mode fail, status is %d.\n", ret);
- return ret;
- }
- return 0;
+
+ return ret;
}
void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
@@ -3648,20 +3617,20 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
- hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
- hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
- hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
- hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
- hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
+ hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
+ hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
+ hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
+ hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -3689,7 +3658,7 @@ static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en)
/* 2 Then setup the loopback flag */
loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
- hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
+ hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
@@ -3704,6 +3673,55 @@ static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en)
return ret;
}
+static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en)
+{
+#define HCLGE_SERDES_RETRY_MS 10
+#define HCLGE_SERDES_RETRY_NUM 100
+ struct hclge_serdes_lb_cmd *req;
+ struct hclge_desc desc;
+ int ret, i = 0;
+
+ req = (struct hclge_serdes_lb_cmd *)&desc.data[0];
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
+
+ if (en) {
+ req->enable = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+ req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+ } else {
+ req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "serdes loopback set fail, ret = %d\n", ret);
+ return ret;
+ }
+
+ do {
+ msleep(HCLGE_SERDES_RETRY_MS);
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
+ true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "serdes loopback get, ret = %d\n", ret);
+ return ret;
+ }
+ } while (++i < HCLGE_SERDES_RETRY_NUM &&
+ !(req->result & HCLGE_CMD_SERDES_DONE_B));
+
+ if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
+ dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
+ return -EBUSY;
+ } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
+ dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int hclge_set_loopback(struct hnae3_handle *handle,
enum hnae3_loop loop_mode, bool en)
{
@@ -3715,6 +3733,9 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
case HNAE3_MAC_INTER_LOOP_MAC:
ret = hclge_set_mac_loopback(hdev, en);
break;
+ case HNAE3_MAC_INTER_LOOP_SERDES:
+ ret = hclge_set_serdes_loopback(hdev, en);
+ break;
default:
ret = -ENOTSUPP;
dev_err(&hdev->pdev->dev,
@@ -3763,7 +3784,7 @@ static int hclge_ae_start(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- int i, ret;
+ int i;
for (i = 0; i < vport->alloc_tqps; i++)
hclge_tqp_enable(hdev, i, 0, true);
@@ -3777,9 +3798,7 @@ static int hclge_ae_start(struct hnae3_handle *handle)
/* reset tqp stats */
hclge_reset_tqp_stats(handle);
- ret = hclge_mac_start_phy(hdev);
- if (ret)
- return ret;
+ hclge_mac_start_phy(hdev);
return 0;
}
@@ -3911,7 +3930,7 @@ static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
#define HCLGE_FUNC_NUMBER_PER_DESC 6
int i, j;
- for (i = 0; i < HCLGE_DESC_NUMBER; i++)
+ for (i = 1; i < HCLGE_DESC_NUMBER; i++)
for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
if (desc[i].data[j])
return false;
@@ -3953,20 +3972,18 @@ static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
req = (struct hclge_mta_filter_mode_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
- hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
- enable);
- hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
- HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
+ hnae3_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
+ enable);
+ hnae3_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
+ HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"Config mat filter mode failed for cmd_send, ret =%d.\n",
ret);
- return ret;
- }
- return 0;
+ return ret;
}
int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
@@ -3980,19 +3997,17 @@ int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
- hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
- enable);
+ hnae3_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
+ enable);
req->function_id = func_id;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"Config func_id enable failed for cmd_send, ret =%d.\n",
ret);
- return ret;
- }
- return 0;
+ return ret;
}
static int hclge_set_mta_table_item(struct hclge_vport *vport,
@@ -4007,10 +4022,10 @@ static int hclge_set_mta_table_item(struct hclge_vport *vport,
req = (struct hclge_cfg_func_mta_item_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
- hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
+ hnae3_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
- hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
- HCLGE_CFG_MTA_ITEM_IDX_S, idx);
+ hnae3_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
+ HCLGE_CFG_MTA_ITEM_IDX_S, idx);
req->item_idx = cpu_to_le16(item_idx);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -4257,17 +4272,10 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
}
memset(&req, 0, sizeof(req));
- hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
- hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
- hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0);
- hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
-
- hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0);
- hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0);
- hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
- HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
- hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M,
- HCLGE_MAC_EPORT_PFID_S, 0);
+ hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+
+ hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
+ HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
req.egress_port = cpu_to_le16(egress_port);
@@ -4318,8 +4326,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
}
memset(&req, 0, sizeof(req));
- hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
- hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+ hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+ hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr);
ret = hclge_remove_mac_vlan_tbl(vport, &req);
@@ -4331,7 +4339,7 @@ static int hclge_add_mc_addr(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
- return hclge_add_mc_addr_common(vport, addr);
+ return hclge_add_mc_addr_common(vport, addr);
}
int hclge_add_mc_addr_common(struct hclge_vport *vport,
@@ -4351,10 +4359,10 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
return -EINVAL;
}
memset(&req, 0, sizeof(req));
- hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
- hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
- hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
- hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+ hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+ hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+ hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
+ hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (!status) {
@@ -4418,10 +4426,10 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
}
memset(&req, 0, sizeof(req));
- hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
- hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
- hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
- hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+ hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+ hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+ hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
+ hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (!status) {
@@ -4604,13 +4612,11 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
req->vlan_fe = filter_en;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
ret);
- return ret;
- }
- return 0;
+ return ret;
}
#define HCLGE_FILTER_TYPE_VF 0
@@ -4802,19 +4808,19 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
- vcfg->accept_tag1 ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
- vcfg->accept_untag1 ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
- vcfg->accept_tag2 ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
- vcfg->accept_untag2 ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
- vcfg->insert_tag1_en ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
- vcfg->insert_tag2_en ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
+ vcfg->accept_tag1 ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
+ vcfg->accept_untag1 ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
+ vcfg->accept_tag2 ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
+ vcfg->accept_untag2 ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
+ vcfg->insert_tag1_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
+ vcfg->insert_tag2_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
req->vf_bitmap[req->vf_offset] =
@@ -4840,14 +4846,14 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
- vcfg->strip_tag1_en ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
- vcfg->strip_tag2_en ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
- vcfg->vlan1_vlan_prionly ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
- vcfg->vlan2_vlan_prionly ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
+ vcfg->strip_tag1_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
+ vcfg->strip_tag2_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
+ vcfg->vlan1_vlan_prionly ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
+ vcfg->vlan2_vlan_prionly ? 1 : 0);
req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
req->vf_bitmap[req->vf_offset] =
@@ -4999,16 +5005,15 @@ static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu)
req = (struct hclge_config_max_frm_size_cmd *)desc.data;
req->max_frm_size = cpu_to_le16(max_frm_size);
+ req->min_frm_size = HCLGE_MAC_MIN_FRAME;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
- return ret;
- }
-
- hdev->mps = max_frm_size;
+ else
+ hdev->mps = max_frm_size;
- return 0;
+ return ret;
}
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
@@ -5043,7 +5048,7 @@ static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
- hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
+ hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -5073,7 +5078,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
return ret;
}
- return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
+ return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
}
static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle,
@@ -5380,12 +5385,12 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle,
phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
- mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
- HCLGE_PHY_MDIX_CTRL_S);
+ mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
+ HCLGE_PHY_MDIX_CTRL_S);
retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
- mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
- is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
+ mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
+ is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
@@ -5412,6 +5417,16 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle,
*tp_mdix = ETH_TP_MDI;
}
+static int hclge_init_instance_hw(struct hclge_dev *hdev)
+{
+ return hclge_mac_connect_phy(hdev);
+}
+
+static void hclge_uninit_instance_hw(struct hclge_dev *hdev)
+{
+ hclge_mac_disconnect_phy(hdev);
+}
+
static int hclge_init_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
@@ -5431,6 +5446,13 @@ static int hclge_init_client_instance(struct hnae3_client *client,
if (ret)
return ret;
+ ret = hclge_init_instance_hw(hdev);
+ if (ret) {
+ client->ops->uninit_instance(&vport->nic,
+ 0);
+ return ret;
+ }
+
if (hdev->roce_client &&
hnae3_dev_roce_supported(hdev)) {
struct hnae3_client *rc = hdev->roce_client;
@@ -5493,6 +5515,7 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
if (client->type == HNAE3_CLIENT_ROCE)
return;
if (client->ops->uninit_instance) {
+ hclge_uninit_instance_hw(hdev);
client->ops->uninit_instance(&vport->nic, 0);
hdev->nic_client = NULL;
vport->nic.client = NULL;
@@ -5531,7 +5554,6 @@ static int hclge_pci_init(struct hclge_dev *hdev)
pci_set_master(pdev);
hw = &hdev->hw;
- hw->back = hdev;
hw->io_base = pcim_iomap(pdev, 2, 0);
if (!hw->io_base) {
dev_err(&pdev->dev, "Can't map configuration register space\n");
@@ -5562,6 +5584,30 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
pci_disable_device(pdev);
}
+static void hclge_state_init(struct hclge_dev *hdev)
+{
+ set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
+ set_bit(HCLGE_STATE_DOWN, &hdev->state);
+ clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
+ clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
+}
+
+static void hclge_state_uninit(struct hclge_dev *hdev)
+{
+ set_bit(HCLGE_STATE_DOWN, &hdev->state);
+
+ if (hdev->service_timer.function)
+ del_timer_sync(&hdev->service_timer);
+ if (hdev->service_task.func)
+ cancel_work_sync(&hdev->service_task);
+ if (hdev->rst_service_task.func)
+ cancel_work_sync(&hdev->rst_service_task);
+ if (hdev->mbx_service_task.func)
+ cancel_work_sync(&hdev->mbx_service_task);
+}
+
static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
@@ -5577,8 +5623,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev->pdev = pdev;
hdev->ae_dev = ae_dev;
hdev->reset_type = HNAE3_NONE_RESET;
- hdev->reset_request = 0;
- hdev->reset_pending = 0;
ae_dev->priv = hdev;
ret = hclge_pci_init(hdev);
@@ -5702,12 +5746,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
/* Enable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, true);
- set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
- set_bit(HCLGE_STATE_DOWN, &hdev->state);
- clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
- clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
- clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
- clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
+ hclge_state_init(hdev);
pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
return 0;
@@ -5812,16 +5851,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
struct hclge_dev *hdev = ae_dev->priv;
struct hclge_mac *mac = &hdev->hw.mac;
- set_bit(HCLGE_STATE_DOWN, &hdev->state);
-
- if (hdev->service_timer.function)
- del_timer_sync(&hdev->service_timer);
- if (hdev->service_task.func)
- cancel_work_sync(&hdev->service_task);
- if (hdev->rst_service_task.func)
- cancel_work_sync(&hdev->rst_service_task);
- if (hdev->mbx_service_task.func)
- cancel_work_sync(&hdev->mbx_service_task);
+ hclge_state_uninit(hdev);
if (mac->phydev)
mdiobus_unregister(mac->mdio_bus);
@@ -5905,9 +5935,10 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
u32 *rss_indir;
int ret, i;
+ /* Free old tqps, and reallocate with new tqp number when nic setup */
hclge_release_tqp(vport);
- ret = hclge_knic_setup(vport, new_tqps_num);
+ ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc);
if (ret) {
dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
return ret;
@@ -6149,8 +6180,8 @@ static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
req = (struct hclge_set_led_state_cmd *)desc.data;
- hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
- HCLGE_LED_LOCATE_STATE_S, locate_led_status);
+ hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
+ HCLGE_LED_LOCATE_STATE_S, locate_led_status);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
@@ -6280,7 +6311,6 @@ static const struct hnae3_ae_ops hclge_ops = {
static struct hnae3_ae_algo ae_algo = {
.ops = &hclge_ops,
- .name = HCLGE_NAME,
.pdev_id_table = ae_algo_pci_tbl,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 7488534528cd..1528fb3fa6be 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016~2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HCLGE_MAIN_H
#define __HCLGE_MAIN_H
@@ -22,8 +16,6 @@
#define HCLGE_INVALID_VPORT 0xffff
-#define HCLGE_ROCE_VECTOR_OFFSET 96
-
#define HCLGE_PF_CFG_BLOCK_SIZE 32
#define HCLGE_PF_CFG_DESC_NUM \
(HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES)
@@ -40,7 +32,7 @@
#define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0
#define HCLGE_RSS_HASH_ALGO_SIMPLE 1
#define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2
-#define HCLGE_RSS_HASH_ALGO_MASK 0xf
+#define HCLGE_RSS_HASH_ALGO_MASK GENMASK(3, 0)
#define HCLGE_RSS_CFG_TBL_NUM \
(HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE)
@@ -77,11 +69,11 @@
/* Copper Specific Status Register */
#define HCLGE_PHY_CSS_REG 17
-#define HCLGE_PHY_MDIX_CTRL_S (5)
+#define HCLGE_PHY_MDIX_CTRL_S 5
#define HCLGE_PHY_MDIX_CTRL_M GENMASK(6, 5)
-#define HCLGE_PHY_MDIX_STATUS_B (6)
-#define HCLGE_PHY_SPEED_DUP_RESOLVE_B (11)
+#define HCLGE_PHY_MDIX_STATUS_B 6
+#define HCLGE_PHY_SPEED_DUP_RESOLVE_B 11
/* Factor used to calculate offset and bitmap of VF num */
#define HCLGE_VF_NUM_PER_CMD 64
@@ -89,9 +81,10 @@
/* Reset related Registers */
#define HCLGE_MISC_RESET_STS_REG 0x20700
+#define HCLGE_MISC_VECTOR_INT_STS 0x20800
#define HCLGE_GLOBAL_RESET_REG 0x20A00
-#define HCLGE_GLOBAL_RESET_BIT 0x0
-#define HCLGE_CORE_RESET_BIT 0x1
+#define HCLGE_GLOBAL_RESET_BIT 0
+#define HCLGE_CORE_RESET_BIT 1
#define HCLGE_FUN_RST_ING 0x20C00
#define HCLGE_FUN_RST_ING_B 0
@@ -128,6 +121,7 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_MBX_SERVICE_SCHED,
HCLGE_STATE_MBX_HANDLING,
HCLGE_STATE_STATISTICS_UPDATING,
+ HCLGE_STATE_CMD_DISABLE,
HCLGE_STATE_MAX
};
@@ -138,12 +132,6 @@ enum hclge_evt_cause {
};
#define HCLGE_MPF_ENBALE 1
-struct hclge_caps {
- u16 num_tqp;
- u16 num_buffer_cell;
- u32 flag;
- u16 vmdq;
-};
enum HCLGE_MAC_SPEED {
HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */
@@ -189,8 +177,6 @@ struct hclge_hw {
struct hclge_mac mac;
int num_vec;
struct hclge_cmq cmq;
- struct hclge_caps caps;
- void *back;
};
/* TQP stats */
@@ -202,7 +188,10 @@ struct hlcge_tqp_stats {
};
struct hclge_tqp {
- struct device *dev; /* Device for DMA mapping */
+ /* copy of device pointer from pci_dev,
+ * used when perform DMA mapping
+ */
+ struct device *dev;
struct hnae3_queue q;
struct hlcge_tqp_stats tqp_stats;
u16 index; /* Global index in a NIC controller */
@@ -492,13 +481,11 @@ struct hclge_dev {
u16 num_tqps; /* Num task queue pairs of this PF */
u16 num_req_vfs; /* Num VFs requested for this PF */
- /* Base task tqp physical id of this PF */
- u16 base_tqp_pid;
+ u16 base_tqp_pid; /* Base task tqp physical id of this PF */
u16 alloc_rss_size; /* Allocated RSS task queue */
u16 rss_size_max; /* HW defined max RSS task queue */
- /* Num of guaranteed filters for this PF */
- u16 fdir_pf_filter_count;
+ u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */
u16 num_alloc_vport; /* Num vports this driver supports */
u32 numa_node_mask;
u16 rx_buf_len;
@@ -520,6 +507,7 @@ struct hclge_dev {
u16 num_msi;
u16 num_msi_left;
u16 num_msi_used;
+ u16 roce_base_msix_offset;
u32 base_msi_vector;
u16 *vector_status;
int *vector_irq;
@@ -560,7 +548,7 @@ struct hclge_dev {
u32 mps; /* Max packet size */
enum hclge_mta_dmac_sel_type mta_mac_sel_type;
- bool enable_mta; /* Mutilcast filter enable */
+ bool enable_mta; /* Multicast filter enable */
struct hclge_vlan_type_cfg vlan_type_cfg;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 7541cb9b71ce..f34851c91eb3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -104,13 +104,15 @@ static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head)
}
}
-/* hclge_get_ring_chain_from_mbx: get ring type & tqpid from mailbox message
+/* hclge_get_ring_chain_from_mbx: get ring type & tqp id & int_gl idx
+ * from mailbox message
* msg[0]: opcode
* msg[1]: <not relevant to this function>
* msg[2]: ring_num
* msg[3]: first ring type (TX|RX)
* msg[4]: first tqp id
- * msg[5] ~ msg[14]: other ring type and tqp id
+ * msg[5]: first int_gl idx
+ * msg[6] ~ msg[14]: other ring type, tqp id and int_gl idx
*/
static int hclge_get_ring_chain_from_mbx(
struct hclge_mbx_vf_to_pf_cmd *req,
@@ -128,12 +130,12 @@ static int hclge_get_ring_chain_from_mbx(
HCLGE_MBX_RING_NODE_VARIABLE_NUM))
return -ENOMEM;
- hnae_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
+ hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
ring_chain->tqp_index =
hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]);
- hnae_set_field(ring_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
- HCLGE_INT_GL_IDX_S,
- req->msg[5]);
+ hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S,
+ req->msg[5]);
cur_chain = ring_chain;
@@ -142,19 +144,19 @@ static int hclge_get_ring_chain_from_mbx(
if (!new_chain)
goto err;
- hnae_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
- req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
- HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]);
+ hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
+ req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
+ HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]);
new_chain->tqp_index =
hclge_get_queue_id(vport->nic.kinfo.tqp
[req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1]]);
- hnae_set_field(new_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
- HCLGE_INT_GL_IDX_S,
- req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
- HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]);
+ hnae3_set_field(new_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S,
+ req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
+ HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]);
cur_chain->next = new_chain;
cur_chain = new_chain;
@@ -460,7 +462,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
- if (unlikely(!hnae_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
+ if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
dev_warn(&hdev->pdev->dev,
"dropped invalid mailbox message, code = %d\n",
req->msg[0]);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 9f7932e423b5..398971a062f4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016~2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/etherdevice.h>
#include <linux/kernel.h>
@@ -67,16 +61,16 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
- hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
- HCLGE_MDIO_PHYID_S, phyid);
- hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
- HCLGE_MDIO_PHYREG_S, regnum);
+ hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
+ HCLGE_MDIO_PHYID_S, phyid);
+ hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
+ HCLGE_MDIO_PHYREG_S, regnum);
- hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
- hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
- HCLGE_MDIO_CTRL_ST_S, 1);
- hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
- HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE);
+ hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
+ hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
+ HCLGE_MDIO_CTRL_ST_S, 1);
+ hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
+ HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE);
mdio_cmd->data_wr = cpu_to_le16(data);
@@ -105,16 +99,16 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
- hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
- HCLGE_MDIO_PHYID_S, phyid);
- hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
- HCLGE_MDIO_PHYREG_S, regnum);
+ hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
+ HCLGE_MDIO_PHYID_S, phyid);
+ hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
+ HCLGE_MDIO_PHYREG_S, regnum);
- hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
- hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
- HCLGE_MDIO_CTRL_ST_S, 1);
- hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
- HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ);
+ hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
+ hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
+ HCLGE_MDIO_CTRL_ST_S, 1);
+ hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
+ HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ);
/* Read out phy data */
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -125,7 +119,7 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
return ret;
}
- if (hnae_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) {
+ if (hnae3_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) {
dev_err(&hdev->pdev->dev, "mdio read data error\n");
return -EIO;
}
@@ -199,7 +193,7 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
netdev_err(netdev, "failed to configure flow control.\n");
}
-int hclge_mac_start_phy(struct hclge_dev *hdev)
+int hclge_mac_connect_phy(struct hclge_dev *hdev)
{
struct net_device *netdev = hdev->vport[0].nic.netdev;
struct phy_device *phydev = hdev->hw.mac.phydev;
@@ -208,6 +202,8 @@ int hclge_mac_start_phy(struct hclge_dev *hdev)
if (!phydev)
return 0;
+ phydev->supported &= ~SUPPORTED_FIBRE;
+
ret = phy_connect_direct(netdev, phydev,
hclge_mac_adjust_link,
PHY_INTERFACE_MODE_SGMII);
@@ -219,11 +215,29 @@ int hclge_mac_start_phy(struct hclge_dev *hdev)
phydev->supported &= HCLGE_PHY_SUPPORTED_FEATURES;
phydev->advertising = phydev->supported;
- phy_start(phydev);
-
return 0;
}
+void hclge_mac_disconnect_phy(struct hclge_dev *hdev)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ if (!phydev)
+ return;
+
+ phy_disconnect(phydev);
+}
+
+void hclge_mac_start_phy(struct hclge_dev *hdev)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ if (!phydev)
+ return;
+
+ phy_start(phydev);
+}
+
void hclge_mac_stop_phy(struct hclge_dev *hdev)
{
struct net_device *netdev = hdev->vport[0].nic.netdev;
@@ -233,5 +247,4 @@ void hclge_mac_stop_phy(struct hclge_dev *hdev)
return;
phy_stop(phydev);
- phy_disconnect(phydev);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
index c5e91cfb8f2c..5fbf7dddb5d9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
@@ -1,17 +1,13 @@
-/*
- * Copyright (c) 2016-2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HCLGE_MDIO_H
#define __HCLGE_MDIO_H
int hclge_mac_mdio_config(struct hclge_dev *hdev);
-int hclge_mac_start_phy(struct hclge_dev *hdev);
+int hclge_mac_connect_phy(struct hclge_dev *hdev);
+void hclge_mac_disconnect_phy(struct hclge_dev *hdev);
+void hclge_mac_start_phy(struct hclge_dev *hdev);
void hclge_mac_stop_phy(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 262c125f8137..5db70a1451c5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016~2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/etherdevice.h>
@@ -1184,10 +1178,10 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
u16 qs_id = vport->qs_offset + tc;
u8 grp, sub_grp;
- grp = hnae_get_field(qs_id, HCLGE_BP_GRP_ID_M,
- HCLGE_BP_GRP_ID_S);
- sub_grp = hnae_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
- HCLGE_BP_SUB_GRP_ID_S);
+ grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M,
+ HCLGE_BP_GRP_ID_S);
+ sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
+ HCLGE_BP_SUB_GRP_ID_S);
if (i == grp)
qs_bitmap |= (1 << sub_grp);
@@ -1223,6 +1217,10 @@ static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
tx_en = true;
rx_en = true;
break;
+ case HCLGE_FC_PFC:
+ tx_en = false;
+ rx_en = false;
+ break;
default:
tx_en = true;
rx_en = true;
@@ -1240,8 +1238,9 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
if (ret)
return ret;
- if (hdev->tm_info.fc_mode != HCLGE_FC_PFC)
- return hclge_mac_pause_setup_hw(hdev);
+ ret = hclge_mac_pause_setup_hw(hdev);
+ if (ret)
+ return ret;
/* Only DCB-supported dev supports qset back pressure and pfc cmd */
if (!hnae3_dev_dcb_supported(hdev))
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index c2b6e8a6700f..dd4c194747c1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016~2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HCLGE_TM_H
#define __HCLGE_TM_H
@@ -123,10 +117,11 @@ struct hclge_port_shapping_cmd {
};
#define hclge_tm_set_field(dest, string, val) \
- hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \
- (HCLGE_TM_SHAP_##string##_LSH), val)
+ hnae3_set_field((dest), \
+ (HCLGE_TM_SHAP_##string##_MSK), \
+ (HCLGE_TM_SHAP_##string##_LSH), val)
#define hclge_tm_get_field(src, string) \
- hnae_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \
+ hnae3_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \
(HCLGE_TM_SHAP_##string##_LSH))
int hclge_tm_schd_init(struct hclge_dev *hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index 1bbfe131b596..fb471fe2c494 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -76,32 +76,24 @@ static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
{
int size = ring->desc_num * sizeof(struct hclgevf_desc);
- ring->desc = kzalloc(size, GFP_KERNEL);
+ ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
+ size, &ring->desc_dma_addr,
+ GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
- ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc,
- size, DMA_BIDIRECTIONAL);
-
- if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) {
- ring->desc_dma_addr = 0;
- kfree(ring->desc);
- ring->desc = NULL;
- return -ENOMEM;
- }
-
return 0;
}
static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
{
- dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr,
- ring->desc_num * sizeof(ring->desc[0]),
- hclgevf_ring_to_dma_dir(ring));
+ int size = ring->desc_num * sizeof(struct hclgevf_desc);
- ring->desc_dma_addr = 0;
- kfree(ring->desc);
- ring->desc = NULL;
+ if (ring->desc) {
+ dma_free_coherent(cmq_ring_to_dev(ring), size,
+ ring->desc, ring->desc_dma_addr);
+ ring->desc = NULL;
+ }
}
static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index 621c6cbacf76..19b32860309c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -82,6 +82,7 @@ struct hclgevf_cmq {
enum hclgevf_opcode_type {
/* Generic command */
HCLGEVF_OPC_QUERY_FW_VER = 0x0001,
+ HCLGEVF_OPC_QUERY_VF_RSRC = 0x0024,
/* TQP command */
HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03,
HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13,
@@ -134,6 +135,19 @@ struct hclgevf_query_version_cmd {
__le32 firmware_rsv[5];
};
+#define HCLGEVF_MSIX_OFT_ROCEE_S 0
+#define HCLGEVF_MSIX_OFT_ROCEE_M (0xffff << HCLGEVF_MSIX_OFT_ROCEE_S)
+#define HCLGEVF_VEC_NUM_S 0
+#define HCLGEVF_VEC_NUM_M (0xff << HCLGEVF_VEC_NUM_S)
+struct hclgevf_query_res_cmd {
+ __le16 tqp_num;
+ __le16 reserved;
+ __le16 msixcap_localid_ba_nic;
+ __le16 msixcap_localid_ba_rocee;
+ __le16 vf_intr_vector_number;
+ __le16 rsv[7];
+};
+
#define HCLGEVF_RSS_HASH_KEY_OFFSET 4
#define HCLGEVF_RSS_HASH_KEY_NUM 16
struct hclgevf_rss_config_cmd {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index a17872aab168..9c0091f2addf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -330,6 +330,12 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
{
+ if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) {
+ dev_warn(&hdev->pdev->dev,
+ "vector(vector_id %d) has been freed.\n", vector_id);
+ return;
+ }
+
hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
hdev->num_msi_left += 1;
hdev->num_msi_used -= 1;
@@ -444,12 +450,12 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
- hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
- (tc_valid[i] & 0x1));
- hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
- HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
- hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
- HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
+ hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
+ (tc_valid[i] & 0x1));
+ hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
+ HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
+ hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
+ HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
}
status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (status)
@@ -547,24 +553,18 @@ static int hclgevf_get_tc_size(struct hnae3_handle *handle)
}
static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
- int vector,
+ int vector_id,
struct hnae3_ring_chain_node *ring_chain)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hnae3_ring_chain_node *node;
struct hclge_mbx_vf_to_pf_cmd *req;
struct hclgevf_desc desc;
- int i = 0, vector_id;
+ int i = 0;
int status;
u8 type;
req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
- vector_id = hclgevf_get_vector_index(hdev, vector);
- if (vector_id < 0) {
- dev_err(&handle->pdev->dev,
- "Get vector index fail. ret =%d\n", vector_id);
- return vector_id;
- }
for (node = ring_chain; node; node = node->next) {
int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM +
@@ -582,11 +582,11 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
}
req->msg[idx_offset] =
- hnae_get_bit(node->flag, HNAE3_RING_TYPE_B);
+ hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
req->msg[idx_offset + 1] = node->tqp_index;
- req->msg[idx_offset + 2] = hnae_get_field(node->int_gl_idx,
- HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S);
+ req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S);
i++;
if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM -
@@ -617,7 +617,17 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
struct hnae3_ring_chain_node *ring_chain)
{
- return hclgevf_bind_ring_to_vector(handle, true, vector, ring_chain);
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int vector_id;
+
+ vector_id = hclgevf_get_vector_index(hdev, vector);
+ if (vector_id < 0) {
+ dev_err(&handle->pdev->dev,
+ "Get vector index fail. ret =%d\n", vector_id);
+ return vector_id;
+ }
+
+ return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain);
}
static int hclgevf_unmap_ring_from_vector(
@@ -635,7 +645,7 @@ static int hclgevf_unmap_ring_from_vector(
return vector_id;
}
- ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain);
+ ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain);
if (ret)
dev_err(&handle->pdev->dev,
"Unmap ring from vector fail. vector=%d, ret =%d\n",
@@ -648,8 +658,17 @@ static int hclgevf_unmap_ring_from_vector(
static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int vector_id;
- hclgevf_free_vector(hdev, vector);
+ vector_id = hclgevf_get_vector_index(hdev, vector);
+ if (vector_id < 0) {
+ dev_err(&handle->pdev->dev,
+ "hclgevf_put_vector get vector index fail. ret =%d\n",
+ vector_id);
+ return vector_id;
+ }
+
+ hclgevf_free_vector(hdev, vector_id);
return 0;
}
@@ -990,8 +1009,8 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
/* wait to check the hardware reset completion status */
val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
- while (hnae_get_bit(val, HCLGEVF_FUN_RST_ING_B) &&
- (cnt < HCLGEVF_RESET_WAIT_CNT)) {
+ while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) &&
+ (cnt < HCLGEVF_RESET_WAIT_CNT)) {
msleep(HCLGEVF_RESET_WAIT_MS);
val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
cnt++;
@@ -1351,14 +1370,13 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
struct hnae3_handle *roce = &hdev->roce;
struct hnae3_handle *nic = &hdev->nic;
- roce->rinfo.num_vectors = HCLGEVF_ROCEE_VECTOR_NUM;
+ roce->rinfo.num_vectors = hdev->num_roce_msix;
if (hdev->num_msi_left < roce->rinfo.num_vectors ||
hdev->num_msi_left == 0)
return -EINVAL;
- roce->rinfo.base_vector =
- hdev->vector_status[hdev->num_msi_used];
+ roce->rinfo.base_vector = hdev->roce_base_vector;
roce->rinfo.netdev = nic->kinfo.netdev;
roce->rinfo.roce_io_base = hdev->hw.io_base;
@@ -1501,10 +1519,15 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
if (hclgevf_dev_ongoing_reset(hdev))
return 0;
- hdev->num_msi = HCLGEVF_MAX_VF_VECTOR_NUM;
+ if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B))
+ vectors = pci_alloc_irq_vectors(pdev,
+ hdev->roce_base_msix_offset + 1,
+ hdev->num_msi,
+ PCI_IRQ_MSIX);
+ else
+ vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
+ PCI_IRQ_MSI | PCI_IRQ_MSIX);
- vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
- PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (vectors < 0) {
dev_err(&pdev->dev,
"failed(%d) to allocate MSI/MSI-X vectors\n",
@@ -1519,6 +1542,7 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
hdev->num_msi = vectors;
hdev->num_msi_left = vectors;
hdev->base_msi_vector = pdev->irq;
+ hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset;
hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
sizeof(u16), GFP_KERNEL);
@@ -1582,9 +1606,10 @@ static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
hclgevf_free_vector(hdev, 0);
}
-static int hclgevf_init_instance(struct hclgevf_dev *hdev,
- struct hnae3_client *client)
+static int hclgevf_init_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
{
+ struct hclgevf_dev *hdev = ae_dev->priv;
int ret;
switch (client->type) {
@@ -1635,9 +1660,11 @@ static int hclgevf_init_instance(struct hclgevf_dev *hdev,
return 0;
}
-static void hclgevf_uninit_instance(struct hclgevf_dev *hdev,
- struct hnae3_client *client)
+static void hclgevf_uninit_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
/* un-init roce, if it exists */
if (hdev->roce_client)
hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
@@ -1648,22 +1675,6 @@ static void hclgevf_uninit_instance(struct hclgevf_dev *hdev,
client->ops->uninit_instance(&hdev->nic, 0);
}
-static int hclgevf_register_client(struct hnae3_client *client,
- struct hnae3_ae_dev *ae_dev)
-{
- struct hclgevf_dev *hdev = ae_dev->priv;
-
- return hclgevf_init_instance(hdev, client);
-}
-
-static void hclgevf_unregister_client(struct hnae3_client *client,
- struct hnae3_ae_dev *ae_dev)
-{
- struct hclgevf_dev *hdev = ae_dev->priv;
-
- hclgevf_uninit_instance(hdev, client);
-}
-
static int hclgevf_pci_init(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
@@ -1727,6 +1738,45 @@ static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
pci_disable_device(pdev);
}
+static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_query_res_cmd *req;
+ struct hclgevf_desc desc;
+ int ret;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true);
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "query vf resource failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ req = (struct hclgevf_query_res_cmd *)desc.data;
+
+ if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) {
+ hdev->roce_base_msix_offset =
+ hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
+ HCLGEVF_MSIX_OFT_ROCEE_M,
+ HCLGEVF_MSIX_OFT_ROCEE_S);
+ hdev->num_roce_msix =
+ hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
+ HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
+
+ /* VF should have NIC vectors and Roce vectors, NIC vectors
+ * are queued before Roce vectors. The offset is fixed to 64.
+ */
+ hdev->num_msi = hdev->num_roce_msix +
+ hdev->roce_base_msix_offset;
+ } else {
+ hdev->num_msi =
+ hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
+ HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
+ }
+
+ return 0;
+}
+
static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
@@ -1744,18 +1794,26 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
return ret;
}
+ ret = hclgevf_cmd_init(hdev);
+ if (ret)
+ goto err_cmd_init;
+
+ /* Get vf resource */
+ ret = hclgevf_query_vf_resource(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query vf status error, ret = %d.\n", ret);
+ goto err_query_vf;
+ }
+
ret = hclgevf_init_msi(hdev);
if (ret) {
dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
- goto err_irq_init;
+ goto err_query_vf;
}
hclgevf_state_init(hdev);
- ret = hclgevf_cmd_init(hdev);
- if (ret)
- goto err_cmd_init;
-
ret = hclgevf_misc_irq_init(hdev);
if (ret) {
dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
@@ -1811,11 +1869,11 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
err_config:
hclgevf_misc_irq_uninit(hdev);
err_misc_irq_init:
- hclgevf_cmd_uninit(hdev);
-err_cmd_init:
hclgevf_state_uninit(hdev);
hclgevf_uninit_msi(hdev);
-err_irq_init:
+err_query_vf:
+ hclgevf_cmd_uninit(hdev);
+err_cmd_init:
hclgevf_pci_uninit(hdev);
return ret;
}
@@ -1924,8 +1982,8 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
static const struct hnae3_ae_ops hclgevf_ops = {
.init_ae_dev = hclgevf_init_ae_dev,
.uninit_ae_dev = hclgevf_uninit_ae_dev,
- .init_client_instance = hclgevf_register_client,
- .uninit_client_instance = hclgevf_unregister_client,
+ .init_client_instance = hclgevf_init_client_instance,
+ .uninit_client_instance = hclgevf_uninit_client_instance,
.start = hclgevf_ae_start,
.stop = hclgevf_ae_stop,
.map_ring_to_vector = hclgevf_map_ring_to_vector,
@@ -1962,7 +2020,6 @@ static const struct hnae3_ae_ops hclgevf_ops = {
static struct hnae3_ae_algo ae_algovf = {
.ops = &hclgevf_ops,
- .name = HCLGEVF_NAME,
.pdev_id_table = ae_algovf_pci_tbl,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 0656e8e5c5f0..b23ba171473c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -12,7 +12,6 @@
#define HCLGEVF_MOD_VERSION "1.0"
#define HCLGEVF_DRIVER_NAME "hclgevf"
-#define HCLGEVF_ROCEE_VECTOR_NUM 0
#define HCLGEVF_MISC_VECTOR_NUM 0
#define HCLGEVF_INVALID_VPORT 0xffff
@@ -150,6 +149,9 @@ struct hclgevf_dev {
u16 num_msi;
u16 num_msi_left;
u16 num_msi_used;
+ u16 num_roce_msix; /* Num of roce vectors for this VF */
+ u16 roce_base_msix_offset;
+ int roce_base_vector;
u32 base_msi_vector;
u16 *vector_status;
int *vector_irq;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index b598c06af8e0..e9d5a4f96304 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -152,7 +152,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
- if (unlikely(!hnae_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
+ if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
dev_warn(&hdev->pdev->dev,
"dropped invalid mailbox message, code = %d\n",
req->msg[0]);
@@ -208,7 +208,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
/* tail the async message in arq */
msg_q = hdev->arq.msg_q[hdev->arq.tail];
- memcpy(&msg_q[0], req->msg, HCLGE_MBX_MAX_ARQ_MSG_SIZE);
+ memcpy(&msg_q[0], req->msg,
+ HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
hclge_mbx_tail_ptr_move_arq(hdev->arq);
hdev->arq.count++;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 79b567447084..6b19607a4caa 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -264,7 +264,6 @@ static int init_fw_ctxt(struct hinic_hwdev *hwdev)
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
struct hinic_cmd_fw_ctxt fw_ctxt;
- struct hinic_pfhwdev *pfhwdev;
u16 out_size;
int err;
@@ -276,8 +275,6 @@ static int init_fw_ctxt(struct hinic_hwdev *hwdev)
fw_ctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
fw_ctxt.rx_buf_sz = HINIC_RX_BUF_SZ;
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_FWCTXT_INIT,
&fw_ctxt, sizeof(fw_ctxt),
&fw_ctxt, &out_size);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
index b9db6d649743..cb239627770f 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
@@ -635,17 +635,18 @@ void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx,
}
/**
- * hinic_sq_read_wqe - read wqe ptr in the current ci and update the ci
+ * hinic_sq_read_wqebb - read wqe ptr in the current ci and update the ci, the
+ * wqe only have one wqebb
* @sq: send queue
* @skb: return skb that was saved
- * @wqe_size: the size of the wqe
+ * @wqe_size: the wqe size ptr
* @cons_idx: consumer index of the wqe
*
* Return wqe in ci position
**/
-struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq,
- struct sk_buff **skb,
- unsigned int *wqe_size, u16 *cons_idx)
+struct hinic_sq_wqe *hinic_sq_read_wqebb(struct hinic_sq *sq,
+ struct sk_buff **skb,
+ unsigned int *wqe_size, u16 *cons_idx)
{
struct hinic_hw_wqe *hw_wqe;
struct hinic_sq_wqe *sq_wqe;
@@ -658,6 +659,8 @@ struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq,
if (IS_ERR(hw_wqe))
return NULL;
+ *skb = sq->saved_skb[*cons_idx];
+
sq_wqe = &hw_wqe->sq_wqe;
ctrl = &sq_wqe->ctrl;
ctrl_info = be32_to_cpu(ctrl->ctrl_info);
@@ -665,11 +668,28 @@ struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq,
*wqe_size = sizeof(*ctrl) + sizeof(sq_wqe->task);
*wqe_size += SECT_SIZE_FROM_8BYTES(buf_sect_len);
+ *wqe_size = ALIGN(*wqe_size, sq->wq->wqebb_size);
- *skb = sq->saved_skb[*cons_idx];
+ return &hw_wqe->sq_wqe;
+}
+
+/**
+ * hinic_sq_read_wqe - read wqe ptr in the current ci and update the ci
+ * @sq: send queue
+ * @skb: return skb that was saved
+ * @wqe_size: the size of the wqe
+ * @cons_idx: consumer index of the wqe
+ *
+ * Return wqe in ci position
+ **/
+struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq,
+ struct sk_buff **skb,
+ unsigned int wqe_size, u16 *cons_idx)
+{
+ struct hinic_hw_wqe *hw_wqe;
- /* using the real wqe size to read wqe again */
- hw_wqe = hinic_read_wqe(sq->wq, *wqe_size, cons_idx);
+ hw_wqe = hinic_read_wqe(sq->wq, wqe_size, cons_idx);
+ *skb = sq->saved_skb[*cons_idx];
return &hw_wqe->sq_wqe;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
index df729a1587e9..6c84f83ec283 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
@@ -165,7 +165,11 @@ void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx,
struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq,
struct sk_buff **skb,
- unsigned int *wqe_size, u16 *cons_idx);
+ unsigned int wqe_size, u16 *cons_idx);
+
+struct hinic_sq_wqe *hinic_sq_read_wqebb(struct hinic_sq *sq,
+ struct sk_buff **skb,
+ unsigned int *wqe_size, u16 *cons_idx);
void hinic_sq_put_wqe(struct hinic_sq *sq, unsigned int wqe_size);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 2353ec829c04..c5fca0356c9c 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -283,7 +283,11 @@ static void free_all_tx_skbs(struct hinic_txq *txq)
int nr_sges;
u16 ci;
- while ((sq_wqe = hinic_sq_read_wqe(sq, &skb, &wqe_size, &ci))) {
+ while ((sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &ci))) {
+ sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &ci);
+ if (!sq_wqe)
+ break;
+
nr_sges = skb_shinfo(skb)->nr_frags + 1;
hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges);
@@ -319,11 +323,21 @@ static int free_tx_poll(struct napi_struct *napi, int budget)
do {
hw_ci = HW_CONS_IDX(sq) & wq->mask;
- sq_wqe = hinic_sq_read_wqe(sq, &skb, &wqe_size, &sw_ci);
+ /* Reading a WQEBB to get real WQE size and consumer index. */
+ sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci);
if ((!sq_wqe) ||
(((hw_ci - sw_ci) & wq->mask) * wq->wqebb_size < wqe_size))
break;
+ /* If this WQE have multiple WQEBBs, we will read again to get
+ * full size WQE.
+ */
+ if (wqe_size > wq->wqebb_size) {
+ sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &sw_ci);
+ if (unlikely(!sq_wqe))
+ break;
+ }
+
tx_bytes += skb->len;
pkts++;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index ffe7acbeaa22..dafdd4ade705 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -718,23 +718,6 @@ static int init_tx_pools(struct net_device *netdev)
return 0;
}
-static void release_error_buffers(struct ibmvnic_adapter *adapter)
-{
- struct device *dev = &adapter->vdev->dev;
- struct ibmvnic_error_buff *error_buff, *tmp;
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->error_list_lock, flags);
- list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) {
- list_del(&error_buff->list);
- dma_unmap_single(dev, error_buff->dma, error_buff->len,
- DMA_FROM_DEVICE);
- kfree(error_buff->buff);
- kfree(error_buff);
- }
- spin_unlock_irqrestore(&adapter->error_list_lock, flags);
-}
-
static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
{
int i;
@@ -896,7 +879,6 @@ static void release_resources(struct ibmvnic_adapter *adapter)
release_tx_pools(adapter);
release_rx_pools(adapter);
- release_error_buffers(adapter);
release_napi(adapter);
release_login_rsp_buffer(adapter);
}
@@ -3843,132 +3825,41 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
ibmvnic_send_crq(adapter, &crq);
}
-static void handle_error_info_rsp(union ibmvnic_crq *crq,
- struct ibmvnic_adapter *adapter)
-{
- struct device *dev = &adapter->vdev->dev;
- struct ibmvnic_error_buff *error_buff, *tmp;
- unsigned long flags;
- bool found = false;
- int i;
-
- if (!crq->request_error_rsp.rc.code) {
- dev_info(dev, "Request Error Rsp returned with rc=%x\n",
- crq->request_error_rsp.rc.code);
- return;
- }
-
- spin_lock_irqsave(&adapter->error_list_lock, flags);
- list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
- if (error_buff->error_id == crq->request_error_rsp.error_id) {
- found = true;
- list_del(&error_buff->list);
- break;
- }
- spin_unlock_irqrestore(&adapter->error_list_lock, flags);
-
- if (!found) {
- dev_err(dev, "Couldn't find error id %x\n",
- be32_to_cpu(crq->request_error_rsp.error_id));
- return;
- }
-
- dev_err(dev, "Detailed info for error id %x:",
- be32_to_cpu(crq->request_error_rsp.error_id));
-
- for (i = 0; i < error_buff->len; i++) {
- pr_cont("%02x", (int)error_buff->buff[i]);
- if (i % 8 == 7)
- pr_cont(" ");
- }
- pr_cont("\n");
-
- dma_unmap_single(dev, error_buff->dma, error_buff->len,
- DMA_FROM_DEVICE);
- kfree(error_buff->buff);
- kfree(error_buff);
-}
-
-static void request_error_information(struct ibmvnic_adapter *adapter,
- union ibmvnic_crq *err_crq)
-{
- struct device *dev = &adapter->vdev->dev;
- struct net_device *netdev = adapter->netdev;
- struct ibmvnic_error_buff *error_buff;
- unsigned long timeout = msecs_to_jiffies(30000);
- union ibmvnic_crq crq;
- unsigned long flags;
- int rc, detail_len;
-
- error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
- if (!error_buff)
- return;
-
- detail_len = be32_to_cpu(err_crq->error_indication.detail_error_sz);
- error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
- if (!error_buff->buff) {
- kfree(error_buff);
- return;
- }
-
- error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, error_buff->dma)) {
- netdev_err(netdev, "Couldn't map error buffer\n");
- kfree(error_buff->buff);
- kfree(error_buff);
- return;
- }
-
- error_buff->len = detail_len;
- error_buff->error_id = err_crq->error_indication.error_id;
-
- spin_lock_irqsave(&adapter->error_list_lock, flags);
- list_add_tail(&error_buff->list, &adapter->errors);
- spin_unlock_irqrestore(&adapter->error_list_lock, flags);
-
- memset(&crq, 0, sizeof(crq));
- crq.request_error_info.first = IBMVNIC_CRQ_CMD;
- crq.request_error_info.cmd = REQUEST_ERROR_INFO;
- crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
- crq.request_error_info.len = cpu_to_be32(detail_len);
- crq.request_error_info.error_id = err_crq->error_indication.error_id;
-
- rc = ibmvnic_send_crq(adapter, &crq);
- if (rc) {
- netdev_err(netdev, "failed to request error information\n");
- goto err_info_fail;
- }
-
- if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
- netdev_err(netdev, "timeout waiting for error information\n");
- goto err_info_fail;
+static const char *ibmvnic_fw_err_cause(u16 cause)
+{
+ switch (cause) {
+ case ADAPTER_PROBLEM:
+ return "adapter problem";
+ case BUS_PROBLEM:
+ return "bus problem";
+ case FW_PROBLEM:
+ return "firmware problem";
+ case DD_PROBLEM:
+ return "device driver problem";
+ case EEH_RECOVERY:
+ return "EEH recovery";
+ case FW_UPDATED:
+ return "firmware updated";
+ case LOW_MEMORY:
+ return "low Memory";
+ default:
+ return "unknown";
}
-
- return;
-
-err_info_fail:
- spin_lock_irqsave(&adapter->error_list_lock, flags);
- list_del(&error_buff->list);
- spin_unlock_irqrestore(&adapter->error_list_lock, flags);
-
- kfree(error_buff->buff);
- kfree(error_buff);
}
static void handle_error_indication(union ibmvnic_crq *crq,
struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
+ u16 cause;
- dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
- crq->error_indication.flags
- & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
- be32_to_cpu(crq->error_indication.error_id),
- be16_to_cpu(crq->error_indication.error_cause));
+ cause = be16_to_cpu(crq->error_indication.error_cause);
- if (be32_to_cpu(crq->error_indication.error_id))
- request_error_information(adapter, crq);
+ dev_warn_ratelimited(dev,
+ "Firmware reports %serror, cause: %s. Starting recovery...\n",
+ crq->error_indication.flags
+ & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
+ ibmvnic_fw_err_cause(cause));
if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
ibmvnic_reset(adapter, VNIC_RESET_FATAL);
@@ -4468,10 +4359,6 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
netdev_dbg(netdev, "Got Error Indication\n");
handle_error_indication(crq, adapter);
break;
- case REQUEST_ERROR_RSP:
- netdev_dbg(netdev, "Got Error Detail Response\n");
- handle_error_info_rsp(crq, adapter);
- break;
case REQUEST_STATISTICS_RSP:
netdev_dbg(netdev, "Got Statistics Response\n");
complete(&adapter->stats_done);
@@ -4830,9 +4717,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
spin_lock_init(&adapter->stats_lock);
- INIT_LIST_HEAD(&adapter->errors);
- spin_lock_init(&adapter->error_list_lock);
-
INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
INIT_LIST_HEAD(&adapter->rwi_list);
mutex_init(&adapter->reset_lock);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index f9fb780102ac..f06eec145ca6 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -512,24 +512,6 @@ struct ibmvnic_error_indication {
u8 reserved2[2];
} __packed __aligned(8);
-struct ibmvnic_request_error_info {
- u8 first;
- u8 cmd;
- u8 reserved[2];
- __be32 ioba;
- __be32 len;
- __be32 error_id;
-} __packed __aligned(8);
-
-struct ibmvnic_request_error_rsp {
- u8 first;
- u8 cmd;
- u8 reserved[2];
- __be32 error_id;
- __be32 len;
- struct ibmvnic_rc rc;
-} __packed __aligned(8);
-
struct ibmvnic_link_state_indication {
u8 first;
u8 cmd;
@@ -709,8 +691,6 @@ union ibmvnic_crq {
struct ibmvnic_request_debug_stats request_debug_stats;
struct ibmvnic_request_debug_stats request_debug_stats_rsp;
struct ibmvnic_error_indication error_indication;
- struct ibmvnic_request_error_info request_error_info;
- struct ibmvnic_request_error_rsp request_error_rsp;
struct ibmvnic_link_state_indication link_state_indication;
struct ibmvnic_change_mac_addr change_mac_addr;
struct ibmvnic_change_mac_addr change_mac_addr_rsp;
@@ -809,8 +789,6 @@ enum ibmvnic_commands {
SET_PHYS_PARMS = 0x07,
SET_PHYS_PARMS_RSP = 0x87,
ERROR_INDICATION = 0x08,
- REQUEST_ERROR_INFO = 0x09,
- REQUEST_ERROR_RSP = 0x89,
LOGICAL_LINK_STATE = 0x0C,
LOGICAL_LINK_STATE_RSP = 0x8C,
REQUEST_STATISTICS = 0x0D,
@@ -945,14 +923,6 @@ struct ibmvnic_rx_pool {
struct ibmvnic_long_term_buff long_term_buff;
};
-struct ibmvnic_error_buff {
- char *buff;
- dma_addr_t dma;
- int len;
- struct list_head list;
- __be32 error_id;
-};
-
struct ibmvnic_vpd {
unsigned char *buff;
dma_addr_t dma_addr;
@@ -1047,9 +1017,6 @@ struct ibmvnic_adapter {
struct completion init_done;
int init_done_rc;
- struct list_head errors;
- spinlock_t error_list_lock;
-
struct completion fw_done;
int fw_done_rc;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index ddbea79d18e5..501ee718177f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -868,6 +868,8 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
cmd_completed = true;
if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
status = 0;
+ else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
+ status = I40E_ERR_NOT_READY;
else
status = I40E_ERR_ADMIN_QUEUE_ERROR;
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 7d888e05f96f..80e3eec6134e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -2247,6 +2247,8 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
struct i40e_aqc_nvm_update {
u8 command_flags;
#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_REARRANGE_TO_FLAT 0x20
+#define I40E_AQ_NVM_REARRANGE_TO_STRUCT 0x40
#define I40E_AQ_NVM_FLASH_ONLY 0x80
#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index eb2d1530d331..85f75b5978fc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -3541,6 +3541,41 @@ i40e_aq_update_nvm_exit:
}
/**
+ * i40e_aq_rearrange_nvm
+ * @hw: pointer to the hw struct
+ * @rearrange_nvm: defines direction of rearrangement
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Rearrange NVM structure, available only for transition FW
+ **/
+i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
+ u8 rearrange_nvm,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aqc_nvm_update *cmd;
+ i40e_status status;
+ struct i40e_aq_desc desc;
+
+ cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
+
+ rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
+ I40E_AQ_NVM_REARRANGE_TO_STRUCT);
+
+ if (!rearrange_nvm) {
+ status = I40E_ERR_PARAM;
+ goto i40e_aq_rearrange_nvm_exit;
+ }
+
+ cmd->command_flags |= rearrange_nvm;
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+i40e_aq_rearrange_nvm_exit:
+ return status;
+}
+
+/**
* i40e_aq_get_lldp_mib
* @hw: pointer to the hw struct
* @bridge_type: type of bridge requested
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 6947a2a571cb..abcd096ede14 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -7,6 +7,11 @@
#include "i40e_diag.h"
struct i40e_stats {
+ /* The stat_string is expected to be a format string formatted using
+ * vsnprintf by i40e_add_stat_strings. Every member of a stats array
+ * should use the same format specifiers as they will be formatted
+ * using the same variadic arguments.
+ */
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
int stat_offset;
@@ -26,6 +31,8 @@ struct i40e_stats {
I40E_STAT(struct i40e_vsi, _name, _stat)
#define I40E_VEB_STAT(_name, _stat) \
I40E_STAT(struct i40e_veb, _name, _stat)
+#define I40E_PFC_STAT(_name, _stat) \
+ I40E_STAT(struct i40e_pfc_stats, _name, _stat)
static const struct i40e_stats i40e_gstrings_net_stats[] = {
I40E_NETDEV_STAT(rx_packets),
@@ -56,6 +63,13 @@ static const struct i40e_stats i40e_gstrings_veb_stats[] = {
I40E_VEB_STAT("veb.rx_unknown_protocol", stats.rx_unknown_protocol),
};
+static const struct i40e_stats i40e_gstrings_veb_tc_stats[] = {
+ I40E_VEB_STAT("veb.tc_%u_tx_packets", tc_stats.tc_tx_packets),
+ I40E_VEB_STAT("veb.tc_%u_tx_bytes", tc_stats.tc_tx_bytes),
+ I40E_VEB_STAT("veb.tc_%u_rx_packets", tc_stats.tc_rx_packets),
+ I40E_VEB_STAT("veb.tc_%u_rx_bytes", tc_stats.tc_rx_bytes),
+};
+
static const struct i40e_stats i40e_gstrings_misc_stats[] = {
I40E_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
I40E_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
@@ -141,6 +155,22 @@ static const struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("port.rx_lpi_count", stats.rx_lpi_count),
};
+struct i40e_pfc_stats {
+ u64 priority_xon_rx;
+ u64 priority_xoff_rx;
+ u64 priority_xon_tx;
+ u64 priority_xoff_tx;
+ u64 priority_xon_2_xoff;
+};
+
+static const struct i40e_stats i40e_gstrings_pfc_stats[] = {
+ I40E_PFC_STAT("port.tx_priority_%u_xon_tx", priority_xon_tx),
+ I40E_PFC_STAT("port.tx_priority_%u_xoff_tx", priority_xoff_tx),
+ I40E_PFC_STAT("port.rx_priority_%u_xon_rx", priority_xon_rx),
+ I40E_PFC_STAT("port.rx_priority_%u_xoff_rx", priority_xoff_rx),
+ I40E_PFC_STAT("port.rx_priority_%u_xon_2_xoff", priority_xon_2_xoff),
+};
+
/* We use num_tx_queues here as a proxy for the maximum number of queues
* available because we always allocate queues symmetrically.
*/
@@ -155,23 +185,17 @@ static const struct i40e_stats i40e_gstrings_stats[] = {
#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
I40E_MISC_STATS_LEN + \
I40E_QUEUE_STATS_LEN((n)))
-#define I40E_PFC_STATS_LEN ( \
- (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
- FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \
- FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_tx) + \
- FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
- FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
- / sizeof(u64))
-#define I40E_VEB_TC_STATS_LEN ( \
- (FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_packets) + \
- FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_bytes) + \
- FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_packets) + \
- FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_bytes)) \
- / sizeof(u64))
-#define I40E_VEB_STATS_LEN ARRAY_SIZE(i40e_gstrings_veb_stats)
-#define I40E_VEB_STATS_TOTAL (I40E_VEB_STATS_LEN + I40E_VEB_TC_STATS_LEN)
+
+#define I40E_PFC_STATS_LEN (ARRAY_SIZE(i40e_gstrings_pfc_stats) * \
+ I40E_MAX_USER_PRIORITY)
+
+#define I40E_VEB_STATS_LEN (ARRAY_SIZE(i40e_gstrings_veb_stats) + \
+ (ARRAY_SIZE(i40e_gstrings_veb_tc_stats) * \
+ I40E_MAX_TRAFFIC_CLASS))
+
#define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \
I40E_PFC_STATS_LEN + \
+ I40E_VEB_STATS_LEN + \
I40E_VSI_STATS_LEN((n)))
enum i40e_ethtool_test_id {
@@ -1565,7 +1589,6 @@ static int i40e_set_ringparam(struct net_device *netdev,
}
for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *ring;
u16 unused;
/* clone ring and setup updated count */
@@ -1589,9 +1612,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
/* now allocate the Rx buffers to make sure the OS
* has enough memory, any failure here means abort
*/
- ring = &rx_rings[i];
- unused = I40E_DESC_UNUSED(ring);
- err = i40e_alloc_rx_buffers(ring, unused);
+ unused = I40E_DESC_UNUSED(&rx_rings[i]);
+ err = i40e_alloc_rx_buffers(&rx_rings[i], unused);
rx_unwind:
if (err) {
do {
@@ -1681,7 +1703,7 @@ static int i40e_get_stats_count(struct net_device *netdev)
struct i40e_pf *pf = vsi->back;
if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1)
- return I40E_PF_STATS_LEN(netdev) + I40E_VEB_STATS_TOTAL;
+ return I40E_PF_STATS_LEN(netdev);
else
return I40E_VSI_STATS_LEN(netdev);
}
@@ -1706,6 +1728,114 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
}
/**
+ * i40e_add_one_ethtool_stat - copy the stat into the supplied buffer
+ * @data: location to store the stat value
+ * @pointer: basis for where to copy from
+ * @stat: the stat definition
+ *
+ * Copies the stat data defined by the pointer and stat structure pair into
+ * the memory supplied as data. Used to implement i40e_add_ethtool_stats.
+ * If the pointer is null, data will be zero'd.
+ */
+static inline void
+i40e_add_one_ethtool_stat(u64 *data, void *pointer,
+ const struct i40e_stats *stat)
+{
+ char *p;
+
+ if (!pointer) {
+ /* ensure that the ethtool data buffer is zero'd for any stats
+ * which don't have a valid pointer.
+ */
+ *data = 0;
+ return;
+ }
+
+ p = (char *)pointer + stat->stat_offset;
+ switch (stat->sizeof_stat) {
+ case sizeof(u64):
+ *data = *((u64 *)p);
+ break;
+ case sizeof(u32):
+ *data = *((u32 *)p);
+ break;
+ case sizeof(u16):
+ *data = *((u16 *)p);
+ break;
+ case sizeof(u8):
+ *data = *((u8 *)p);
+ break;
+ default:
+ WARN_ONCE(1, "unexpected stat size for %s",
+ stat->stat_string);
+ *data = 0;
+ }
+}
+
+/**
+ * __i40e_add_ethtool_stats - copy stats into the ethtool supplied buffer
+ * @data: ethtool stats buffer
+ * @pointer: location to copy stats from
+ * @stats: array of stats to copy
+ * @size: the size of the stats definition
+ *
+ * Copy the stats defined by the stats array using the pointer as a base into
+ * the data buffer supplied by ethtool. Updates the data pointer to point to
+ * the next empty location for successive calls to __i40e_add_ethtool_stats.
+ * If pointer is null, set the data values to zero and update the pointer to
+ * skip these stats.
+ **/
+static inline void
+__i40e_add_ethtool_stats(u64 **data, void *pointer,
+ const struct i40e_stats stats[],
+ const unsigned int size)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ i40e_add_one_ethtool_stat((*data)++, pointer, &stats[i]);
+}
+
+/**
+ * i40e_add_ethtool_stats - copy stats into ethtool supplied buffer
+ * @data: ethtool stats buffer
+ * @pointer: location where stats are stored
+ * @stats: static const array of stat definitions
+ *
+ * Macro to ease the use of __i40e_add_ethtool_stats by taking a static
+ * constant stats array and passing the ARRAY_SIZE(). This avoids typos by
+ * ensuring that we pass the size associated with the given stats array.
+ * Assumes that stats is an array.
+ **/
+#define i40e_add_ethtool_stats(data, pointer, stats) \
+ __i40e_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats))
+
+/**
+ * i40e_get_pfc_stats - copy HW PFC statistics to formatted structure
+ * @pf: the PF device structure
+ * @i: the priority value to copy
+ *
+ * The PFC stats are found as arrays in pf->stats, which is not easy to pass
+ * into i40e_add_ethtool_stats. Produce a formatted i40e_pfc_stats structure
+ * of the PFC stats for the given priority.
+ **/
+static inline struct i40e_pfc_stats
+i40e_get_pfc_stats(struct i40e_pf *pf, unsigned int i)
+{
+#define I40E_GET_PFC_STAT(stat, priority) \
+ .stat = pf->stats.stat[priority]
+
+ struct i40e_pfc_stats pfc = {
+ I40E_GET_PFC_STAT(priority_xon_rx, i),
+ I40E_GET_PFC_STAT(priority_xoff_rx, i),
+ I40E_GET_PFC_STAT(priority_xon_tx, i),
+ I40E_GET_PFC_STAT(priority_xoff_tx, i),
+ I40E_GET_PFC_STAT(priority_xon_2_xoff, i),
+ };
+ return pfc;
+}
+
+/**
* i40e_get_ethtool_stats - copy stat values into supplied buffer
* @netdev: the netdev to collect stats for
* @stats: ethtool stats command structure
@@ -1726,23 +1856,19 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
struct i40e_ring *tx_ring, *rx_ring;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ struct i40e_veb *veb = pf->veb[pf->lan_veb];
unsigned int i;
- char *p;
- struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
unsigned int start;
+ bool veb_stats;
+ u64 *p = data;
i40e_update_stats(vsi);
- for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
- p = (char *)net_stats + i40e_gstrings_net_stats[i].stat_offset;
- *(data++) = (i40e_gstrings_net_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
- for (i = 0; i < I40E_MISC_STATS_LEN; i++) {
- p = (char *)vsi + i40e_gstrings_misc_stats[i].stat_offset;
- *(data++) = (i40e_gstrings_misc_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
+ i40e_add_ethtool_stats(&data, i40e_get_vsi_stats_struct(vsi),
+ i40e_gstrings_net_stats);
+
+ i40e_add_ethtool_stats(&data, vsi, i40e_gstrings_misc_stats);
+
rcu_read_lock();
for (i = 0; i < I40E_MAX_NUM_QUEUES(netdev) ; i++) {
tx_ring = READ_ONCE(vsi->tx_rings[i]);
@@ -1777,45 +1903,72 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
}
rcu_read_unlock();
if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
- return;
+ goto check_data_pointer;
- if ((pf->lan_veb != I40E_NO_VEB) &&
- (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
- struct i40e_veb *veb = pf->veb[pf->lan_veb];
+ veb_stats = ((pf->lan_veb != I40E_NO_VEB) &&
+ (pf->flags & I40E_FLAG_VEB_STATS_ENABLED));
+
+ /* If veb stats aren't enabled, pass NULL instead of the veb so that
+ * we initialize stats to zero and update the data pointer
+ * intelligently
+ */
+ i40e_add_ethtool_stats(&data, veb_stats ? veb : NULL,
+ i40e_gstrings_veb_stats);
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ i40e_add_ethtool_stats(&data, veb_stats ? veb : NULL,
+ i40e_gstrings_veb_tc_stats);
+
+ i40e_add_ethtool_stats(&data, pf, i40e_gstrings_stats);
- for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
- p = (char *)veb;
- p += i40e_gstrings_veb_stats[i].stat_offset;
- *(data++) = (i40e_gstrings_veb_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- *(data++) = veb->tc_stats.tc_tx_packets[i];
- *(data++) = veb->tc_stats.tc_tx_bytes[i];
- *(data++) = veb->tc_stats.tc_rx_packets[i];
- *(data++) = veb->tc_stats.tc_rx_bytes[i];
- }
- } else {
- data += I40E_VEB_STATS_TOTAL;
- }
- for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
- p = (char *)pf + i40e_gstrings_stats[i].stat_offset;
- *(data++) = (i40e_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- *(data++) = pf->stats.priority_xon_tx[i];
- *(data++) = pf->stats.priority_xoff_tx[i];
+ struct i40e_pfc_stats pfc = i40e_get_pfc_stats(pf, i);
+
+ i40e_add_ethtool_stats(&data, &pfc, i40e_gstrings_pfc_stats);
}
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- *(data++) = pf->stats.priority_xon_rx[i];
- *(data++) = pf->stats.priority_xoff_rx[i];
+
+check_data_pointer:
+ WARN_ONCE(data - p != i40e_get_stats_count(netdev),
+ "ethtool stats count mismatch!");
+}
+
+/**
+ * __i40e_add_stat_strings - copy stat strings into ethtool buffer
+ * @p: ethtool supplied buffer
+ * @stats: stat definitions array
+ * @size: size of the stats array
+ *
+ * Format and copy the strings described by stats into the buffer pointed at
+ * by p.
+ **/
+static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],
+ const unsigned int size, ...)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++) {
+ va_list args;
+
+ va_start(args, size);
+ vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args);
+ *p += ETH_GSTRING_LEN;
+ va_end(args);
}
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
- *(data++) = pf->stats.priority_xon_2_xoff[i];
}
/**
+ * 40e_add_stat_strings - copy stat strings into ethtool buffer
+ * @p: ethtool supplied buffer
+ * @stats: stat definitions array
+ *
+ * Format and copy the strings described by the const static stats value into
+ * the buffer pointed at by p. Assumes that stats can have ARRAY_SIZE called
+ * for it.
+ **/
+#define i40e_add_stat_strings(p, stats, ...) \
+ __i40e_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__)
+
+/**
* i40e_get_stat_strings - copy stat strings into supplied buffer
* @netdev: the netdev to collect strings for
* @data: supplied buffer to copy strings into
@@ -1833,16 +1986,10 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
unsigned int i;
u8 *p = data;
- for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
- snprintf(data, ETH_GSTRING_LEN, "%s",
- i40e_gstrings_net_stats[i].stat_string);
- data += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MISC_STATS_LEN; i++) {
- snprintf(data, ETH_GSTRING_LEN, "%s",
- i40e_gstrings_misc_stats[i].stat_string);
- data += ETH_GSTRING_LEN;
- }
+ i40e_add_stat_strings(&data, i40e_gstrings_net_stats);
+
+ i40e_add_stat_strings(&data, i40e_gstrings_misc_stats);
+
for (i = 0; i < I40E_MAX_NUM_QUEUES(netdev); i++) {
snprintf(data, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
data += ETH_GSTRING_LEN;
@@ -1856,52 +2003,15 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
return;
- for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
- snprintf(data, ETH_GSTRING_LEN, "%s",
- i40e_gstrings_veb_stats[i].stat_string);
- data += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- snprintf(data, ETH_GSTRING_LEN,
- "veb.tc_%u_tx_packets", i);
- data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN,
- "veb.tc_%u_tx_bytes", i);
- data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN,
- "veb.tc_%u_rx_packets", i);
- data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN,
- "veb.tc_%u_rx_bytes", i);
- data += ETH_GSTRING_LEN;
- }
+ i40e_add_stat_strings(&data, i40e_gstrings_veb_stats);
- for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
- snprintf(data, ETH_GSTRING_LEN, "%s",
- i40e_gstrings_stats[i].stat_string);
- data += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- snprintf(data, ETH_GSTRING_LEN,
- "port.tx_priority_%u_xon", i);
- data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN,
- "port.tx_priority_%u_xoff", i);
- data += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- snprintf(data, ETH_GSTRING_LEN,
- "port.rx_priority_%u_xon", i);
- data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN,
- "port.rx_priority_%u_xoff", i);
- data += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- snprintf(data, ETH_GSTRING_LEN,
- "port.rx_priority_%u_xon_2_xoff", i);
- data += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ i40e_add_stat_strings(&data, i40e_gstrings_veb_tc_stats, i);
+
+ i40e_add_stat_strings(&data, i40e_gstrings_stats);
+
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+ i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i);
WARN_ONCE(p - data != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN,
"stat strings count mismatch!");
@@ -4535,7 +4645,6 @@ flags_complete:
if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
struct i40e_dcbx_config *dcbcfg;
- int i;
i40e_aq_stop_lldp(&pf->hw, true, NULL);
i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index c944bd10b03d..f2c622e78802 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1800,6 +1800,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
num_tc_qps);
break;
}
+ /* fall through */
case I40E_VSI_FDIR:
case I40E_VSI_SRIOV:
case I40E_VSI_VMDQ2:
@@ -6597,6 +6598,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
config.eee_capability = abilities.eee_capability;
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
+ config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_PHY_FEC_CONFIG_MASK;
err = i40e_aq_set_phy_config(hw, &config, NULL);
if (err) {
@@ -7522,7 +7525,7 @@ static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
case TC_CLSFLOWER_STATS:
return -EOPNOTSUPP;
default:
- return -EINVAL;
+ return -EOPNOTSUPP;
}
}
@@ -7554,7 +7557,7 @@ static int i40e_setup_tc_block(struct net_device *dev,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, i40e_setup_tc_block_cb,
- np, np);
+ np, np, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, i40e_setup_tc_block_cb, np);
return 0;
@@ -11841,7 +11844,6 @@ static int i40e_xdp(struct net_device *dev,
case XDP_SETUP_PROG:
return i40e_xdp_setup(vsi, xdp->prog);
case XDP_QUERY_PROG:
- xdp->prog_attached = i40e_enabled_xdp_vsi(vsi);
xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
return 0;
default:
@@ -11978,7 +11980,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
IFNAMSIZ - 4,
pf->vsi[pf->lan_vsi]->netdev->name);
- random_ether_addr(mac_addr);
+ eth_random_addr(mac_addr);
spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_add_mac_filter(vsi, mac_addr);
@@ -14354,12 +14356,6 @@ static void i40e_shutdown(struct pci_dev *pdev)
set_bit(__I40E_SUSPENDED, pf->state);
set_bit(__I40E_DOWN, pf->state);
- rtnl_lock();
- i40e_prep_for_reset(pf, true);
- rtnl_unlock();
-
- wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
- wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
del_timer_sync(&pf->service_timer);
cancel_work_sync(&pf->service_task);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 3170655cdeb9..e08d754824b1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -193,6 +193,9 @@ i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
bool last_command, u8 preservation_flags,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
+ u8 rearrange_nvm,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
u8 mib_type, void *buff, u16 buff_size,
u16 *local_len, u16 *remote_len,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index b151ae316546..b5042d1a63c0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2253,9 +2253,10 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
break;
default:
bpf_warn_invalid_xdp_action(act);
+ /* fall through */
case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- /* fallthrough -- handle aborts by dropping packet */
+ /* fall through -- handle aborts by dropping packet */
case XDP_DROP:
result = I40E_XDP_CONSUMED;
break;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index c355120dfdfd..21a0dbf6ccf6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -797,6 +797,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
cmd_completed = true;
if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
status = 0;
+ else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
+ status = I40E_ERR_NOT_READY;
else
status = I40E_ERR_ADMIN_QUEUE_ERROR;
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index aa81e87cd471..5fd8529465d4 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -2175,6 +2175,8 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
struct i40e_aqc_nvm_update {
u8 command_flags;
#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_REARRANGE_TO_FLAT 0x20
+#define I40E_AQ_NVM_REARRANGE_TO_STRUCT 0x40
#define I40E_AQ_NVM_FLASH_ONLY 0x80
#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 9cef54971312..eea280ba411e 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -1021,75 +1021,6 @@ do_retry:
}
/**
- * i40evf_aq_set_phy_register
- * @hw: pointer to the hw struct
- * @phy_select: select which phy should be accessed
- * @dev_addr: PHY device address
- * @reg_addr: PHY register address
- * @reg_val: new register value
- * @cmd_details: pointer to command details structure or NULL
- *
- * Reset the external PHY.
- **/
-i40e_status i40evf_aq_set_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
- u32 reg_addr, u32 reg_val,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_phy_register_access *cmd =
- (struct i40e_aqc_phy_register_access *)&desc.params.raw;
- i40e_status status;
-
- i40evf_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_set_phy_register);
-
- cmd->phy_interface = phy_select;
- cmd->dev_address = dev_addr;
- cmd->reg_address = cpu_to_le32(reg_addr);
- cmd->reg_value = cpu_to_le32(reg_val);
-
- status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- return status;
-}
-
-/**
- * i40evf_aq_get_phy_register
- * @hw: pointer to the hw struct
- * @phy_select: select which phy should be accessed
- * @dev_addr: PHY device address
- * @reg_addr: PHY register address
- * @reg_val: read register value
- * @cmd_details: pointer to command details structure or NULL
- *
- * Reset the external PHY.
- **/
-i40e_status i40evf_aq_get_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
- u32 reg_addr, u32 *reg_val,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_phy_register_access *cmd =
- (struct i40e_aqc_phy_register_access *)&desc.params.raw;
- i40e_status status;
-
- i40evf_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_get_phy_register);
-
- cmd->phy_interface = phy_select;
- cmd->dev_address = dev_addr;
- cmd->reg_address = cpu_to_le32(reg_addr);
-
- status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
- if (!status)
- *reg_val = le32_to_cpu(cmd->reg_value);
-
- return status;
-}
-
-/**
* i40e_aq_send_msg_to_pf
* @hw: pointer to the hardware structure
* @v_opcode: opcodes for VF-PF communication
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index a7b87f935411..5906c1c1d19d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -2884,7 +2884,7 @@ static int i40evf_setup_tc_cls_flower(struct i40evf_adapter *adapter,
case TC_CLSFLOWER_STATS:
return -EOPNOTSUPP;
default:
- return -EINVAL;
+ return -EOPNOTSUPP;
}
}
@@ -2926,7 +2926,7 @@ static int i40evf_setup_tc_block(struct net_device *dev,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, i40evf_setup_tc_block_cb,
- adapter, adapter);
+ adapter, adapter, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, i40evf_setup_tc_block_cb,
adapter);
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index b13b42e5a1d9..bafdcf70a353 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -225,19 +225,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
E1000_STATUS_FUNC_SHIFT;
- /* Make sure the PHY is in a good state. Several people have reported
- * firmware leaving the PHY's page select register set to something
- * other than the default of zero, which causes the PHY ID read to
- * access something other than the intended register.
- */
- ret_val = hw->phy.ops.reset(hw);
- if (ret_val) {
- hw_dbg("Error resetting the PHY.\n");
- goto out;
- }
-
/* Set phy->phy_addr and phy->id. */
- igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0);
ret_val = igb_get_phy_id_82575(hw);
if (ret_val)
return ret_val;
@@ -1720,6 +1708,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
/* disable PCS autoneg and support parallel detect only */
pcs_autoneg = false;
+ /* fall through */
default:
if (hw->mac.type == e1000_82575 ||
hw->mac.type == e1000_82576) {
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 252440a418dc..8a28f3388f69 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1048,6 +1048,22 @@
#define E1000_TQAVCTRL_XMIT_MODE BIT(0)
#define E1000_TQAVCTRL_DATAFETCHARB BIT(4)
#define E1000_TQAVCTRL_DATATRANARB BIT(8)
+#define E1000_TQAVCTRL_DATATRANTIM BIT(9)
+#define E1000_TQAVCTRL_SP_WAIT_SR BIT(10)
+/* Fetch Time Delta - bits 31:16
+ *
+ * This field holds the value to be reduced from the launch time for
+ * fetch time decision. The FetchTimeDelta value is defined in 32 ns
+ * granularity.
+ *
+ * This field is 16 bits wide, and so the maximum value is:
+ *
+ * 65535 * 32 = 2097120 ~= 2.1 msec
+ *
+ * XXX: We are configuring the max value here since we couldn't come up
+ * with a reason for not doing so.
+ */
+#define E1000_TQAVCTRL_FETCHTIME_DELTA (0xFFFF << 16)
/* TX Qav Credit Control fields */
#define E1000_TQAVCC_IDLESLOPE_MASK 0xFFFF
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 2be0e762ec69..ad2125e5a7f7 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -659,6 +659,7 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
phy_data |= M88E1000_PSCR_AUTO_X_1000T;
break;
}
+ /* fall through */
case 0:
default:
phy_data |= M88E1000_PSCR_AUTO_X_MODE;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 9643b5b3d444..ca54e268d157 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -262,6 +262,7 @@ struct igb_ring {
u16 count; /* number of desc. in the ring */
u8 queue_index; /* logical index of the ring*/
u8 reg_idx; /* physical index of the ring */
+ bool launchtime_enable; /* true if LaunchTime is enabled */
bool cbs_enable; /* indicates if CBS is enabled */
s32 idleslope; /* idleSlope in kbps */
s32 sendslope; /* sendSlope in kbps */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index f707709969ac..221a735dc956 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1654,33 +1654,65 @@ static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode)
wr32(E1000_I210_TQAVCC(queue), val);
}
+static bool is_any_cbs_enabled(struct igb_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ if (adapter->tx_ring[i]->cbs_enable)
+ return true;
+ }
+
+ return false;
+}
+
+static bool is_any_txtime_enabled(struct igb_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ if (adapter->tx_ring[i]->launchtime_enable)
+ return true;
+ }
+
+ return false;
+}
+
/**
- * igb_configure_cbs - Configure Credit-Based Shaper (CBS)
+ * igb_config_tx_modes - Configure "Qav Tx mode" features on igb
* @adapter: pointer to adapter struct
* @queue: queue number
- * @enable: true = enable CBS, false = disable CBS
- * @idleslope: idleSlope in kbps
- * @sendslope: sendSlope in kbps
- * @hicredit: hiCredit in bytes
- * @locredit: loCredit in bytes
*
- * Configure CBS for a given hardware queue. When disabling, idleslope,
- * sendslope, hicredit, locredit arguments are ignored. Returns 0 if
- * success. Negative otherwise.
+ * Configure CBS and Launchtime for a given hardware queue.
+ * Parameters are retrieved from the correct Tx ring, so
+ * igb_save_cbs_params() and igb_save_txtime_params() should be used
+ * for setting those correctly prior to this function being called.
**/
-static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
- bool enable, int idleslope, int sendslope,
- int hicredit, int locredit)
+static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
{
+ struct igb_ring *ring = adapter->tx_ring[queue];
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
- u32 tqavcc;
+ u32 tqavcc, tqavctrl;
u16 value;
WARN_ON(hw->mac.type != e1000_i210);
WARN_ON(queue < 0 || queue > 1);
- if (enable || queue == 0) {
+ /* If any of the Qav features is enabled, configure queues as SR and
+ * with HIGH PRIO. If none is, then configure them with LOW PRIO and
+ * as SP.
+ */
+ if (ring->cbs_enable || ring->launchtime_enable) {
+ set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
+ set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
+ } else {
+ set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
+ set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
+ }
+
+ /* If CBS is enabled, set DataTranARB and config its parameters. */
+ if (ring->cbs_enable || queue == 0) {
/* i210 does not allow the queue 0 to be in the Strict
* Priority mode while the Qav mode is enabled, so,
* instead of disabling strict priority mode, we give
@@ -1690,14 +1722,19 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
* Queue0 QueueMode must be set to 1b when
* TransmitMode is set to Qav."
*/
- if (queue == 0 && !enable) {
+ if (queue == 0 && !ring->cbs_enable) {
/* max "linkspeed" idleslope in kbps */
- idleslope = 1000000;
- hicredit = ETH_FRAME_LEN;
+ ring->idleslope = 1000000;
+ ring->hicredit = ETH_FRAME_LEN;
}
- set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
- set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
+ /* Always set data transfer arbitration to credit-based
+ * shaper algorithm on TQAVCTRL if CBS is enabled for any of
+ * the queues.
+ */
+ tqavctrl = rd32(E1000_I210_TQAVCTRL);
+ tqavctrl |= E1000_TQAVCTRL_DATATRANARB;
+ wr32(E1000_I210_TQAVCTRL, tqavctrl);
/* According to i210 datasheet section 7.2.7.7, we should set
* the 'idleSlope' field from TQAVCC register following the
@@ -1756,17 +1793,16 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
* calculated value, so the resulting bandwidth might
* be slightly higher for some configurations.
*/
- value = DIV_ROUND_UP_ULL(idleslope * 61034ULL, 1000000);
+ value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000);
tqavcc = rd32(E1000_I210_TQAVCC(queue));
tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
tqavcc |= value;
wr32(E1000_I210_TQAVCC(queue), tqavcc);
- wr32(E1000_I210_TQAVHC(queue), 0x80000000 + hicredit * 0x7735);
+ wr32(E1000_I210_TQAVHC(queue),
+ 0x80000000 + ring->hicredit * 0x7735);
} else {
- set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
- set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
/* Set idleSlope to zero. */
tqavcc = rd32(E1000_I210_TQAVCC(queue));
@@ -1775,6 +1811,43 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
/* Set hiCredit to zero. */
wr32(E1000_I210_TQAVHC(queue), 0);
+
+ /* If CBS is not enabled for any queues anymore, then return to
+ * the default state of Data Transmission Arbitration on
+ * TQAVCTRL.
+ */
+ if (!is_any_cbs_enabled(adapter)) {
+ tqavctrl = rd32(E1000_I210_TQAVCTRL);
+ tqavctrl &= ~E1000_TQAVCTRL_DATATRANARB;
+ wr32(E1000_I210_TQAVCTRL, tqavctrl);
+ }
+ }
+
+ /* If LaunchTime is enabled, set DataTranTIM. */
+ if (ring->launchtime_enable) {
+ /* Always set DataTranTIM on TQAVCTRL if LaunchTime is enabled
+ * for any of the SR queues, and configure fetchtime delta.
+ * XXX NOTE:
+ * - LaunchTime will be enabled for all SR queues.
+ * - A fixed offset can be added relative to the launch
+ * time of all packets if configured at reg LAUNCH_OS0.
+ * We are keeping it as 0 for now (default value).
+ */
+ tqavctrl = rd32(E1000_I210_TQAVCTRL);
+ tqavctrl |= E1000_TQAVCTRL_DATATRANTIM |
+ E1000_TQAVCTRL_FETCHTIME_DELTA;
+ wr32(E1000_I210_TQAVCTRL, tqavctrl);
+ } else {
+ /* If Launchtime is not enabled for any SR queues anymore,
+ * then clear DataTranTIM on TQAVCTRL and clear fetchtime delta,
+ * effectively disabling Launchtime.
+ */
+ if (!is_any_txtime_enabled(adapter)) {
+ tqavctrl = rd32(E1000_I210_TQAVCTRL);
+ tqavctrl &= ~E1000_TQAVCTRL_DATATRANTIM;
+ tqavctrl &= ~E1000_TQAVCTRL_FETCHTIME_DELTA;
+ wr32(E1000_I210_TQAVCTRL, tqavctrl);
+ }
}
/* XXX: In i210 controller the sendSlope and loCredit parameters from
@@ -1782,9 +1855,27 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
* configuration' in respect to these parameters.
*/
- netdev_dbg(netdev, "CBS %s: queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n",
- (enable) ? "enabled" : "disabled", queue,
- idleslope, sendslope, hicredit, locredit);
+ netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d \
+ idleslope %d sendslope %d hiCredit %d \
+ locredit %d\n",
+ (ring->cbs_enable) ? "enabled" : "disabled",
+ (ring->launchtime_enable) ? "enabled" : "disabled", queue,
+ ring->idleslope, ring->sendslope, ring->hicredit,
+ ring->locredit);
+}
+
+static int igb_save_txtime_params(struct igb_adapter *adapter, int queue,
+ bool enable)
+{
+ struct igb_ring *ring;
+
+ if (queue < 0 || queue > adapter->num_tx_queues)
+ return -EINVAL;
+
+ ring = adapter->tx_ring[queue];
+ ring->launchtime_enable = enable;
+
+ return 0;
}
static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
@@ -1807,21 +1898,15 @@ static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
return 0;
}
-static bool is_any_cbs_enabled(struct igb_adapter *adapter)
-{
- struct igb_ring *ring;
- int i;
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- ring = adapter->tx_ring[i];
-
- if (ring->cbs_enable)
- return true;
- }
-
- return false;
-}
-
+/**
+ * igb_setup_tx_mode - Switch to/from Qav Tx mode when applicable
+ * @adapter: pointer to adapter struct
+ *
+ * Configure TQAVCTRL register switching the controller's Tx mode
+ * if FQTSS mode is enabled or disabled. Additionally, will issue
+ * a call to igb_config_tx_modes() per queue so any previously saved
+ * Tx parameters are applied.
+ **/
static void igb_setup_tx_mode(struct igb_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -1836,11 +1921,11 @@ static void igb_setup_tx_mode(struct igb_adapter *adapter)
int i, max_queue;
/* Configure TQAVCTRL register: set transmit mode to 'Qav',
- * set data fetch arbitration to 'round robin' and set data
- * transfer arbitration to 'credit shaper algorithm.
+ * set data fetch arbitration to 'round robin', set SP_WAIT_SR
+ * so SP queues wait for SR ones.
*/
val = rd32(E1000_I210_TQAVCTRL);
- val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_DATATRANARB;
+ val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_SP_WAIT_SR;
val &= ~E1000_TQAVCTRL_DATAFETCHARB;
wr32(E1000_I210_TQAVCTRL, val);
@@ -1881,11 +1966,7 @@ static void igb_setup_tx_mode(struct igb_adapter *adapter)
adapter->num_tx_queues : I210_SR_QUEUES_NUM;
for (i = 0; i < max_queue; i++) {
- struct igb_ring *ring = adapter->tx_ring[i];
-
- igb_configure_cbs(adapter, i, ring->cbs_enable,
- ring->idleslope, ring->sendslope,
- ring->hicredit, ring->locredit);
+ igb_config_tx_modes(adapter, i);
}
} else {
wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
@@ -2459,6 +2540,19 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev,
return features;
}
+static void igb_offload_apply(struct igb_adapter *adapter, s32 queue)
+{
+ if (!is_fqtss_enabled(adapter)) {
+ enable_fqtss(adapter, true);
+ return;
+ }
+
+ igb_config_tx_modes(adapter, queue);
+
+ if (!is_any_cbs_enabled(adapter) && !is_any_txtime_enabled(adapter))
+ enable_fqtss(adapter, false);
+}
+
static int igb_offload_cbs(struct igb_adapter *adapter,
struct tc_cbs_qopt_offload *qopt)
{
@@ -2479,17 +2573,7 @@ static int igb_offload_cbs(struct igb_adapter *adapter,
if (err)
return err;
- if (is_fqtss_enabled(adapter)) {
- igb_configure_cbs(adapter, qopt->queue, qopt->enable,
- qopt->idleslope, qopt->sendslope,
- qopt->hicredit, qopt->locredit);
-
- if (!is_any_cbs_enabled(adapter))
- enable_fqtss(adapter, false);
-
- } else {
- enable_fqtss(adapter, true);
- }
+ igb_offload_apply(adapter, qopt->queue);
return 0;
}
@@ -2698,7 +2782,7 @@ static int igb_setup_tc_cls_flower(struct igb_adapter *adapter,
case TC_CLSFLOWER_STATS:
return -EOPNOTSUPP;
default:
- return -EINVAL;
+ return -EOPNOTSUPP;
}
}
@@ -2728,7 +2812,7 @@ static int igb_setup_tc_block(struct igb_adapter *adapter,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, igb_setup_tc_block_cb,
- adapter, adapter);
+ adapter, adapter, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, igb_setup_tc_block_cb,
adapter);
@@ -2738,6 +2822,29 @@ static int igb_setup_tc_block(struct igb_adapter *adapter,
}
}
+static int igb_offload_txtime(struct igb_adapter *adapter,
+ struct tc_etf_qopt_offload *qopt)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int err;
+
+ /* Launchtime offloading is only supported by i210 controller. */
+ if (hw->mac.type != e1000_i210)
+ return -EOPNOTSUPP;
+
+ /* Launchtime offloading is only supported by queues 0 and 1. */
+ if (qopt->queue < 0 || qopt->queue > 1)
+ return -EINVAL;
+
+ err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable);
+ if (err)
+ return err;
+
+ igb_offload_apply(adapter, qopt->queue);
+
+ return 0;
+}
+
static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
@@ -2748,6 +2855,8 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
return igb_offload_cbs(adapter, type_data);
case TC_SETUP_BLOCK:
return igb_setup_tc_block(adapter, type_data);
+ case TC_SETUP_QDISC_ETF:
+ return igb_offload_txtime(adapter, type_data);
default:
return -EOPNOTSUPP;
@@ -5067,6 +5176,7 @@ bool igb_has_link(struct igb_adapter *adapter)
case e1000_media_type_copper:
if (!hw->mac.get_link_status)
return true;
+ /* fall through */
case e1000_media_type_internal_serdes:
hw->mac.ops.check_for_link(hw);
link_active = !hw->mac.get_link_status;
@@ -5568,11 +5678,14 @@ set_itr_now:
}
}
-static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
- u32 type_tucmd, u32 mss_l4len_idx)
+static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
+ struct igb_tx_buffer *first,
+ u32 vlan_macip_lens, u32 type_tucmd,
+ u32 mss_l4len_idx)
{
struct e1000_adv_tx_context_desc *context_desc;
u16 i = tx_ring->next_to_use;
+ struct timespec64 ts;
context_desc = IGB_TX_CTXTDESC(tx_ring, i);
@@ -5587,9 +5700,18 @@ static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
mss_l4len_idx |= tx_ring->reg_idx << 4;
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
- context_desc->seqnum_seed = 0;
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+
+ /* We assume there is always a valid tx time available. Invalid times
+ * should have been handled by the upper layers.
+ */
+ if (tx_ring->launchtime_enable) {
+ ts = ns_to_timespec64(first->skb->tstamp);
+ context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
+ } else {
+ context_desc->seqnum_seed = 0;
+ }
}
static int igb_tso(struct igb_ring *tx_ring,
@@ -5672,7 +5794,8 @@ static int igb_tso(struct igb_ring *tx_ring,
vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
- igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
+ igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
+ type_tucmd, mss_l4len_idx);
return 1;
}
@@ -5714,6 +5837,7 @@ csum_failed:
type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
break;
}
+ /* fall through */
default:
skb_checksum_help(skb);
goto csum_failed;
@@ -5727,7 +5851,7 @@ no_csum:
vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
- igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
+ igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
}
#define IGB_SET_FLAG(_input, _flag, _result) \
@@ -5909,7 +6033,7 @@ static int igb_tx_map(struct igb_ring *tx_ring,
* We also need this memory barrier to make certain all of the
* status bits have been updated before next_to_watch is written.
*/
- wmb();
+ dma_wmb();
/* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc;
@@ -6015,8 +6139,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
}
}
- skb_tx_timestamp(skb);
-
if (skb_vlan_tag_present(skb)) {
tx_flags |= IGB_TX_FLAGS_VLAN;
tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
@@ -6032,6 +6154,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
else if (!tso)
igb_tx_csum(tx_ring, first);
+ skb_tx_timestamp(skb);
+
if (igb_tx_map(tx_ring, first, hdr_len))
goto cleanup_tx_tstamp;
@@ -8409,7 +8533,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
- wmb();
+ dma_wmb();
writel(i, rx_ring->tail);
}
}
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index f818f060e5a7..e0c989ffb2b3 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2102,6 +2102,7 @@ csum_failed:
type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
break;
}
+ /* fall through */
default:
skb_checksum_help(skb);
goto csum_failed;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 144d5fe6b944..4fc906c6166b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -855,7 +855,8 @@ void ixgbe_free_rx_resources(struct ixgbe_ring *);
void ixgbe_free_tx_resources(struct ixgbe_ring *);
void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
-void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *);
+void ixgbe_disable_rx(struct ixgbe_adapter *adapter);
+void ixgbe_disable_tx(struct ixgbe_adapter *adapter);
void ixgbe_update_stats(struct ixgbe_adapter *adapter);
int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index bd1ba88ec1d5..e5a8461fe6a9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -511,7 +511,7 @@ static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
static int ixgbe_get_regs_len(struct net_device *netdev)
{
-#define IXGBE_REGS_LEN 1139
+#define IXGBE_REGS_LEN 1145
return IXGBE_REGS_LEN * sizeof(u32);
}
@@ -874,6 +874,14 @@ static void ixgbe_get_regs(struct net_device *netdev,
/* X540 specific DCB registers */
regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
+
+ /* Security config registers */
+ regs_buff[1139] = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+ regs_buff[1140] = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
+ regs_buff[1141] = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
+ regs_buff[1142] = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ regs_buff[1143] = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ regs_buff[1144] = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
}
static int ixgbe_get_eeprom_len(struct net_device *netdev)
@@ -1690,35 +1698,17 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
{
- struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
- struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
- struct ixgbe_hw *hw = &adapter->hw;
- u32 reg_ctl;
-
- /* shut down the DMA engines now so they can be reinitialized later */
+ /* Shut down the DMA engines now so they can be reinitialized later,
+ * since the test rings and normally used rings should overlap on
+ * queue 0 we can just use the standard disable Rx/Tx calls and they
+ * will take care of disabling the test rings for us.
+ */
/* first Rx */
- hw->mac.ops.disable_rx(hw);
- ixgbe_disable_rx_queue(adapter, rx_ring);
+ ixgbe_disable_rx(adapter);
/* now Tx */
- reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
- reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
-
- switch (hw->mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- case ixgbe_mac_x550em_a:
- reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
- reg_ctl &= ~IXGBE_DMATXCTL_TE;
- IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
- break;
- default:
- break;
- }
+ ixgbe_disable_tx(adapter);
ixgbe_reset(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 62e57b05a0ae..447098005490 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -4022,38 +4022,6 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
}
}
-void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *ring)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- int wait_loop = IXGBE_MAX_RX_DESC_POLL;
- u32 rxdctl;
- u8 reg_idx = ring->reg_idx;
-
- if (ixgbe_removed(hw->hw_addr))
- return;
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
- rxdctl &= ~IXGBE_RXDCTL_ENABLE;
-
- /* write value back with RXDCTL.ENABLE bit cleared */
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
-
- if (hw->mac.type == ixgbe_mac_82598EB &&
- !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
- return;
-
- /* the hardware may take up to 100us to really disable the rx queue */
- do {
- udelay(10);
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
- } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
-
- if (!wait_loop) {
- e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
- "the polling period\n", reg_idx);
- }
-}
-
void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
@@ -4063,9 +4031,13 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
- /* disable queue to avoid issues while updating state */
+ /* disable queue to avoid use of these values while updating state */
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
- ixgbe_disable_rx_queue(adapter, ring);
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+
+ /* write value back with RXDCTL.ENABLE bit cleared */
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
+ IXGBE_WRITE_FLUSH(hw);
IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
@@ -5275,6 +5247,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
struct ixgbe_fwd_adapter *accel)
{
+ u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
+ int num_tc = netdev_get_num_tc(adapter->netdev);
struct net_device *vdev = accel->netdev;
int i, baseq, err;
@@ -5286,6 +5260,11 @@ static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
accel->rx_base_queue = baseq;
accel->tx_base_queue = baseq;
+ /* record configuration for macvlan interface in vdev */
+ for (i = 0; i < num_tc; i++)
+ netdev_bind_sb_channel_queue(adapter->netdev, vdev,
+ i, rss_i, baseq + (rss_i * i));
+
for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
adapter->rx_ring[baseq + i]->netdev = vdev;
@@ -5310,6 +5289,10 @@ static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n");
+ /* unbind the queues and drop the subordinate channel config */
+ netdev_unbind_sb_channel(adapter->netdev, vdev);
+ netdev_set_sb_channel(vdev, 0);
+
clear_bit(accel->pool, adapter->fwd_bitmask);
kfree(accel);
@@ -5622,6 +5605,212 @@ void ixgbe_up(struct ixgbe_adapter *adapter)
ixgbe_up_complete(adapter);
}
+static unsigned long ixgbe_get_completion_timeout(struct ixgbe_adapter *adapter)
+{
+ u16 devctl2;
+
+ pcie_capability_read_word(adapter->pdev, PCI_EXP_DEVCTL2, &devctl2);
+
+ switch (devctl2 & IXGBE_PCIDEVCTRL2_TIMEO_MASK) {
+ case IXGBE_PCIDEVCTRL2_17_34s:
+ case IXGBE_PCIDEVCTRL2_4_8s:
+ /* For now we cap the upper limit on delay to 2 seconds
+ * as we end up going up to 34 seconds of delay in worst
+ * case timeout value.
+ */
+ case IXGBE_PCIDEVCTRL2_1_2s:
+ return 2000000ul; /* 2.0 s */
+ case IXGBE_PCIDEVCTRL2_260_520ms:
+ return 520000ul; /* 520 ms */
+ case IXGBE_PCIDEVCTRL2_65_130ms:
+ return 130000ul; /* 130 ms */
+ case IXGBE_PCIDEVCTRL2_16_32ms:
+ return 32000ul; /* 32 ms */
+ case IXGBE_PCIDEVCTRL2_1_2ms:
+ return 2000ul; /* 2 ms */
+ case IXGBE_PCIDEVCTRL2_50_100us:
+ return 100ul; /* 100 us */
+ case IXGBE_PCIDEVCTRL2_16_32ms_def:
+ return 32000ul; /* 32 ms */
+ default:
+ break;
+ }
+
+ /* We shouldn't need to hit this path, but just in case default as
+ * though completion timeout is not supported and support 32ms.
+ */
+ return 32000ul;
+}
+
+void ixgbe_disable_rx(struct ixgbe_adapter *adapter)
+{
+ unsigned long wait_delay, delay_interval;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, wait_loop;
+ u32 rxdctl;
+
+ /* disable receives */
+ hw->mac.ops.disable_rx(hw);
+
+ if (ixgbe_removed(hw->hw_addr))
+ return;
+
+ /* disable all enabled Rx queues */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+ u8 reg_idx = ring->reg_idx;
+
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+ rxdctl |= IXGBE_RXDCTL_SWFLSH;
+
+ /* write value back with RXDCTL.ENABLE bit cleared */
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
+ }
+
+ /* RXDCTL.EN may not change on 82598 if link is down, so skip it */
+ if (hw->mac.type == ixgbe_mac_82598EB &&
+ !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+ return;
+
+ /* Determine our minimum delay interval. We will increase this value
+ * with each subsequent test. This way if the device returns quickly
+ * we should spend as little time as possible waiting, however as
+ * the time increases we will wait for larger periods of time.
+ *
+ * The trick here is that we increase the interval using the
+ * following pattern: 1x 3x 5x 7x 9x 11x 13x 15x 17x 19x. The result
+ * of that wait is that it totals up to 100x whatever interval we
+ * choose. Since our minimum wait is 100us we can just divide the
+ * total timeout by 100 to get our minimum delay interval.
+ */
+ delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
+
+ wait_loop = IXGBE_MAX_RX_DESC_POLL;
+ wait_delay = delay_interval;
+
+ while (wait_loop--) {
+ usleep_range(wait_delay, wait_delay + 10);
+ wait_delay += delay_interval * 2;
+ rxdctl = 0;
+
+ /* OR together the reading of all the active RXDCTL registers,
+ * and then test the result. We need the disable to complete
+ * before we start freeing the memory and invalidating the
+ * DMA mappings.
+ */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+ u8 reg_idx = ring->reg_idx;
+
+ rxdctl |= IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ }
+
+ if (!(rxdctl & IXGBE_RXDCTL_ENABLE))
+ return;
+ }
+
+ e_err(drv,
+ "RXDCTL.ENABLE for one or more queues not cleared within the polling period\n");
+}
+
+void ixgbe_disable_tx(struct ixgbe_adapter *adapter)
+{
+ unsigned long wait_delay, delay_interval;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, wait_loop;
+ u32 txdctl;
+
+ if (ixgbe_removed(hw->hw_addr))
+ return;
+
+ /* disable all enabled Tx queues */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *ring = adapter->tx_ring[i];
+ u8 reg_idx = ring->reg_idx;
+
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
+ }
+
+ /* disable all enabled XDP Tx queues */
+ for (i = 0; i < adapter->num_xdp_queues; i++) {
+ struct ixgbe_ring *ring = adapter->xdp_ring[i];
+ u8 reg_idx = ring->reg_idx;
+
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
+ }
+
+ /* If the link is not up there shouldn't be much in the way of
+ * pending transactions. Those that are left will be flushed out
+ * when the reset logic goes through the flush sequence to clean out
+ * the pending Tx transactions.
+ */
+ if (!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+ goto dma_engine_disable;
+
+ /* Determine our minimum delay interval. We will increase this value
+ * with each subsequent test. This way if the device returns quickly
+ * we should spend as little time as possible waiting, however as
+ * the time increases we will wait for larger periods of time.
+ *
+ * The trick here is that we increase the interval using the
+ * following pattern: 1x 3x 5x 7x 9x 11x 13x 15x 17x 19x. The result
+ * of that wait is that it totals up to 100x whatever interval we
+ * choose. Since our minimum wait is 100us we can just divide the
+ * total timeout by 100 to get our minimum delay interval.
+ */
+ delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
+
+ wait_loop = IXGBE_MAX_RX_DESC_POLL;
+ wait_delay = delay_interval;
+
+ while (wait_loop--) {
+ usleep_range(wait_delay, wait_delay + 10);
+ wait_delay += delay_interval * 2;
+ txdctl = 0;
+
+ /* OR together the reading of all the active TXDCTL registers,
+ * and then test the result. We need the disable to complete
+ * before we start freeing the memory and invalidating the
+ * DMA mappings.
+ */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *ring = adapter->tx_ring[i];
+ u8 reg_idx = ring->reg_idx;
+
+ txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
+ }
+ for (i = 0; i < adapter->num_xdp_queues; i++) {
+ struct ixgbe_ring *ring = adapter->xdp_ring[i];
+ u8 reg_idx = ring->reg_idx;
+
+ txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
+ }
+
+ if (!(txdctl & IXGBE_TXDCTL_ENABLE))
+ goto dma_engine_disable;
+ }
+
+ e_err(drv,
+ "TXDCTL.ENABLE for one or more queues not cleared within the polling period\n");
+
+dma_engine_disable:
+ /* Disable the Tx DMA engine on 82599 and later MAC */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
+ (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
+ ~IXGBE_DMATXCTL_TE));
+ /* fall through */
+ default:
+ break;
+ }
+}
+
void ixgbe_reset(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -5803,24 +5992,19 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
if (test_and_set_bit(__IXGBE_DOWN, &adapter->state))
return; /* do nothing if already down */
- /* disable receives */
- hw->mac.ops.disable_rx(hw);
+ /* Shut off incoming Tx traffic */
+ netif_tx_stop_all_queues(netdev);
- /* disable all enabled rx queues */
- for (i = 0; i < adapter->num_rx_queues; i++)
- /* this call also flushes the previous write */
- ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
+ /* call carrier off first to avoid false dev_watchdog timeouts */
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
- usleep_range(10000, 20000);
+ /* Disable Rx */
+ ixgbe_disable_rx(adapter);
/* synchronize_sched() needed for pending XDP buffers to drain */
if (adapter->xdp_ring[0])
synchronize_sched();
- netif_tx_stop_all_queues(netdev);
-
- /* call carrier off first to avoid false dev_watchdog timeouts */
- netif_carrier_off(netdev);
- netif_tx_disable(netdev);
ixgbe_irq_disable(adapter);
@@ -5848,30 +6032,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
}
/* disable transmits in the hardware now that interrupts are off */
- for (i = 0; i < adapter->num_tx_queues; i++) {
- u8 reg_idx = adapter->tx_ring[i]->reg_idx;
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
- }
- for (i = 0; i < adapter->num_xdp_queues; i++) {
- u8 reg_idx = adapter->xdp_ring[i]->reg_idx;
-
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
- }
-
- /* Disable the Tx DMA engine on 82599 and later MAC */
- switch (hw->mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- case ixgbe_mac_x550em_a:
- IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
- (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
- ~IXGBE_DMATXCTL_TE));
- break;
- default:
- break;
- }
+ ixgbe_disable_tx(adapter);
if (!pci_channel_offline(adapter->pdev))
ixgbe_reset(adapter);
@@ -6458,6 +6619,11 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ if (adapter->xdp_prog) {
+ e_warn(probe, "MTU cannot be changed while XDP program is loaded\n");
+ return -EPERM;
+ }
+
/*
* For 82599EB we cannot allow legacy VFs to enable their receive
* paths when MTU greater than 1500 is configured. So display a
@@ -8197,25 +8363,25 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
input, common, ring->queue_index);
}
+#ifdef IXGBE_FCOE
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
- struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
struct ixgbe_adapter *adapter;
- int txq;
-#ifdef IXGBE_FCOE
struct ixgbe_ring_feature *f;
-#endif
+ int txq;
- if (fwd_adapter) {
- adapter = netdev_priv(dev);
- txq = reciprocal_scale(skb_get_hash(skb),
- adapter->num_rx_queues_per_pool);
+ if (sb_dev) {
+ u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
+ struct net_device *vdev = sb_dev;
- return txq + fwd_adapter->tx_base_queue;
- }
+ txq = vdev->tc_to_txq[tc].offset;
+ txq += reciprocal_scale(skb_get_hash(skb),
+ vdev->tc_to_txq[tc].count);
-#ifdef IXGBE_FCOE
+ return txq;
+ }
/*
* only execute the code below if protocol is FCoE
@@ -8226,11 +8392,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
case htons(ETH_P_FIP):
adapter = netdev_priv(dev);
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+ if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
break;
/* fall through */
default:
- return fallback(dev, skb);
+ return fallback(dev, skb, sb_dev);
}
f = &adapter->ring_feature[RING_F_FCOE];
@@ -8242,11 +8408,9 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
txq -= f->indices;
return txq + f->offset;
-#else
- return fallback(dev, skb);
-#endif
}
+#endif
static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
struct xdp_frame *xdpf)
{
@@ -8766,6 +8930,11 @@ static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data)
/* if we cannot find a free pool then disable the offload */
netdev_err(vdev, "L2FW offload disabled due to lack of queue resources\n");
macvlan_release_l2fw_offload(vdev);
+
+ /* unbind the queues and drop the subordinate channel config */
+ netdev_unbind_sb_channel(adapter->netdev, vdev);
+ netdev_set_sb_channel(vdev, 0);
+
kfree(accel);
return 0;
@@ -9329,7 +9498,7 @@ static int ixgbe_setup_tc_block(struct net_device *dev,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, ixgbe_setup_tc_block_cb,
- adapter, adapter);
+ adapter, adapter, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, ixgbe_setup_tc_block_cb,
adapter);
@@ -9393,6 +9562,11 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
features &= ~NETIF_F_LRO;
+ if (adapter->xdp_prog && (features & NETIF_F_LRO)) {
+ e_dev_err("LRO is not supported with XDP\n");
+ features &= ~NETIF_F_LRO;
+ }
+
return features;
}
@@ -9769,6 +9943,13 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
if (!macvlan_supports_dest_filter(vdev))
return ERR_PTR(-EMEDIUMTYPE);
+ /* We need to lock down the macvlan to be a single queue device so that
+ * we can reuse the tc_to_txq field in the macvlan netdev to represent
+ * the queue mapping to our netdev.
+ */
+ if (netif_is_multiqueue(vdev))
+ return ERR_PTR(-ERANGE);
+
pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
if (pool == adapter->num_rx_pools) {
u16 used_pools = adapter->num_vfs + adapter->num_rx_pools;
@@ -9825,6 +10006,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
return ERR_PTR(-ENOMEM);
set_bit(pool, adapter->fwd_bitmask);
+ netdev_set_sb_channel(vdev, pool);
accel->pool = pool;
accel->netdev = vdev;
@@ -9866,6 +10048,10 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
ring->netdev = NULL;
}
+ /* unbind the queues and drop the subordinate channel config */
+ netdev_unbind_sb_channel(pdev, accel->netdev);
+ netdev_set_sb_channel(accel->netdev, 0);
+
clear_bit(accel->pool, adapter->fwd_bitmask);
kfree(accel);
}
@@ -9966,7 +10152,6 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
case XDP_SETUP_PROG:
return ixgbe_xdp_setup(dev, xdp->prog);
case XDP_QUERY_PROG:
- xdp->prog_attached = !!(adapter->xdp_prog);
xdp->prog_id = adapter->xdp_prog ?
adapter->xdp_prog->aux->id : 0;
return 0;
@@ -10026,7 +10211,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close,
.ndo_start_xmit = ixgbe_xmit_frame,
- .ndo_select_queue = ixgbe_select_queue,
.ndo_set_rx_mode = ixgbe_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ixgbe_set_mac,
@@ -10049,6 +10233,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_poll_controller = ixgbe_netpoll,
#endif
#ifdef IXGBE_FCOE
+ .ndo_select_queue = ixgbe_select_queue,
.ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
.ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
.ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 59416eddd840..d86446d202d5 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -4462,7 +4462,6 @@ static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
case XDP_SETUP_PROG:
return ixgbevf_xdp_setup(dev, xdp->prog);
case XDP_QUERY_PROG:
- xdp->prog_attached = !!(adapter->xdp_prog);
xdp->prog_id = adapter->xdp_prog ?
adapter->xdp_prog->aux->id : 0;
return 0;
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 06ff185eb188..a5ab6f3403ae 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1911,10 +1911,10 @@ jme_wait_link(struct jme_adapter *jme)
{
u32 phylink, to = JME_WAIT_LINK_TIME;
- mdelay(1000);
+ msleep(1000);
phylink = jme_linkstat_from_phy(jme);
while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) {
- mdelay(10);
+ usleep_range(10000, 11000);
phylink = jme_linkstat_from_phy(jme);
}
}
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index afc810069440..7a637b51c7d2 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -563,14 +563,6 @@ ltq_etop_set_multicast_list(struct net_device *dev)
spin_unlock_irqrestore(&priv->lock, flags);
}
-static u16
-ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
-{
- /* we are currently only using the first queue */
- return 0;
-}
-
static int
ltq_etop_init(struct net_device *dev)
{
@@ -641,7 +633,7 @@ static const struct net_device_ops ltq_eth_netdev_ops = {
.ndo_set_mac_address = ltq_etop_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = ltq_etop_set_multicast_list,
- .ndo_select_queue = ltq_etop_select_queue,
+ .ndo_select_queue = dev_pick_tx_zero,
.ndo_init = ltq_etop_init,
.ndo_tx_timeout = ltq_etop_tx_timeout,
};
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 0ad2f3f7da85..bc80a678abc3 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -295,10 +295,10 @@
#define MVNETA_RSS_LU_TABLE_SIZE 1
/* Max number of Rx descriptors */
-#define MVNETA_MAX_RXD 128
+#define MVNETA_MAX_RXD 512
/* Max number of Tx descriptors */
-#define MVNETA_MAX_TXD 532
+#define MVNETA_MAX_TXD 1024
/* Max number of allowed TCP segments for software TSO */
#define MVNETA_MAX_TSO_SEGS 100
@@ -328,6 +328,8 @@
enum {
ETHTOOL_STAT_EEE_WAKEUP,
+ ETHTOOL_STAT_SKB_ALLOC_ERR,
+ ETHTOOL_STAT_REFILL_ERR,
ETHTOOL_MAX_STATS,
};
@@ -375,6 +377,8 @@ static const struct mvneta_statistic mvneta_statistics[] = {
{ 0x3054, T_REG_32, "fc_sent", },
{ 0x300c, T_REG_32, "internal_mac_transmit_err", },
{ ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
+ { ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", },
+ { ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", },
};
struct mvneta_pcpu_stats {
@@ -479,7 +483,10 @@ struct mvneta_port {
#define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
#define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
#define MVNETA_RXD_L3_IP4 BIT(25)
-#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
+#define MVNETA_RXD_LAST_DESC BIT(26)
+#define MVNETA_RXD_FIRST_DESC BIT(27)
+#define MVNETA_RXD_FIRST_LAST_DESC (MVNETA_RXD_FIRST_DESC | \
+ MVNETA_RXD_LAST_DESC)
#define MVNETA_RXD_L4_CSUM_OK BIT(30)
#if defined(__LITTLE_ENDIAN)
@@ -589,9 +596,6 @@ struct mvneta_rx_queue {
/* num of rx descriptors in the rx descriptor ring */
int size;
- /* counter of times when mvneta_refill() failed */
- int missed;
-
u32 pkts_coal;
u32 time_coal;
@@ -609,6 +613,18 @@ struct mvneta_rx_queue {
/* Index of the next RX DMA descriptor to process */
int next_desc_to_proc;
+
+ /* Index of first RX DMA descriptor to refill */
+ int first_to_refill;
+ u32 refill_num;
+
+ /* pointer to uncomplete skb buffer */
+ struct sk_buff *skb;
+ int left_size;
+
+ /* error counters */
+ u32 skb_alloc_err;
+ u32 refill_err;
};
static enum cpuhp_state online_hpstate;
@@ -621,6 +637,7 @@ static int txq_number = 8;
static int rxq_def;
static int rx_copybreak __read_mostly = 256;
+static int rx_header_size __read_mostly = 128;
/* HW BM need that each port be identify by a unique ID */
static int global_port_id;
@@ -1684,13 +1701,6 @@ static void mvneta_rx_error(struct mvneta_port *pp,
{
u32 status = rx_desc->status;
- if (!mvneta_rxq_desc_is_first_last(status)) {
- netdev_err(pp->dev,
- "bad rx status %08x (buffer oversize), size=%d\n",
- status, rx_desc->data_size);
- return;
- }
-
switch (status & MVNETA_RXD_ERR_CODE_MASK) {
case MVNETA_RXD_ERR_CRC:
netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
@@ -1715,7 +1725,8 @@ static void mvneta_rx_error(struct mvneta_port *pp,
static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
struct sk_buff *skb)
{
- if ((status & MVNETA_RXD_L3_IP4) &&
+ if ((pp->dev->features & NETIF_F_RXCSUM) &&
+ (status & MVNETA_RXD_L3_IP4) &&
(status & MVNETA_RXD_L4_CSUM_OK)) {
skb->csum = 0;
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1790,47 +1801,30 @@ static void mvneta_txq_done(struct mvneta_port *pp,
}
}
-void *mvneta_frag_alloc(unsigned int frag_size)
-{
- if (likely(frag_size <= PAGE_SIZE))
- return netdev_alloc_frag(frag_size);
- else
- return kmalloc(frag_size, GFP_ATOMIC);
-}
-EXPORT_SYMBOL_GPL(mvneta_frag_alloc);
-
-void mvneta_frag_free(unsigned int frag_size, void *data)
-{
- if (likely(frag_size <= PAGE_SIZE))
- skb_free_frag(data);
- else
- kfree(data);
-}
-EXPORT_SYMBOL_GPL(mvneta_frag_free);
-
/* Refill processing for SW buffer management */
+/* Allocate page per descriptor */
static int mvneta_rx_refill(struct mvneta_port *pp,
struct mvneta_rx_desc *rx_desc,
- struct mvneta_rx_queue *rxq)
-
+ struct mvneta_rx_queue *rxq,
+ gfp_t gfp_mask)
{
dma_addr_t phys_addr;
- void *data;
+ struct page *page;
- data = mvneta_frag_alloc(pp->frag_size);
- if (!data)
+ page = __dev_alloc_page(gfp_mask);
+ if (!page)
return -ENOMEM;
- phys_addr = dma_map_single(pp->dev->dev.parent, data,
- MVNETA_RX_BUF_SIZE(pp->pkt_size),
- DMA_FROM_DEVICE);
+ /* map page for use */
+ phys_addr = dma_map_page(pp->dev->dev.parent, page, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
- mvneta_frag_free(pp->frag_size, data);
+ __free_page(page);
return -ENOMEM;
}
phys_addr += pp->rx_offset_correction;
- mvneta_rx_desc_fill(rx_desc, phys_addr, data, rxq);
+ mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
return 0;
}
@@ -1893,115 +1887,192 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
for (i = 0; i < rxq->size; i++) {
struct mvneta_rx_desc *rx_desc = rxq->descs + i;
void *data = rxq->buf_virt_addr[i];
+ if (!data || !(rx_desc->buf_phys_addr))
+ continue;
dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
- mvneta_frag_free(pp->frag_size, data);
+ __free_page(data);
}
}
+static inline
+int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
+{
+ struct mvneta_rx_desc *rx_desc;
+ int curr_desc = rxq->first_to_refill;
+ int i;
+
+ for (i = 0; (i < rxq->refill_num) && (i < 64); i++) {
+ rx_desc = rxq->descs + curr_desc;
+ if (!(rx_desc->buf_phys_addr)) {
+ if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
+ pr_err("Can't refill queue %d. Done %d from %d\n",
+ rxq->id, i, rxq->refill_num);
+ rxq->refill_err++;
+ break;
+ }
+ }
+ curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc);
+ }
+ rxq->refill_num -= i;
+ rxq->first_to_refill = curr_desc;
+
+ return i;
+}
+
/* Main rx processing when using software buffer management */
-static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
+static int mvneta_rx_swbm(struct napi_struct *napi,
+ struct mvneta_port *pp, int budget,
struct mvneta_rx_queue *rxq)
{
- struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
struct net_device *dev = pp->dev;
- int rx_done;
+ int rx_todo, rx_proc;
+ int refill = 0;
u32 rcvd_pkts = 0;
u32 rcvd_bytes = 0;
/* Get number of received packets */
- rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
-
- if (rx_todo > rx_done)
- rx_todo = rx_done;
-
- rx_done = 0;
+ rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
+ rx_proc = 0;
/* Fairness NAPI loop */
- while (rx_done < rx_todo) {
+ while ((rcvd_pkts < budget) && (rx_proc < rx_todo)) {
struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
- struct sk_buff *skb;
unsigned char *data;
+ struct page *page;
dma_addr_t phys_addr;
- u32 rx_status, frag_size;
- int rx_bytes, err, index;
+ u32 rx_status, index;
+ int rx_bytes, skb_size, copy_size;
+ int frag_num, frag_size, frag_offset;
- rx_done++;
- rx_status = rx_desc->status;
- rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
index = rx_desc - rxq->descs;
- data = rxq->buf_virt_addr[index];
- phys_addr = rx_desc->buf_phys_addr - pp->rx_offset_correction;
-
- if (!mvneta_rxq_desc_is_first_last(rx_status) ||
- (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
- mvneta_rx_error(pp, rx_desc);
-err_drop_frame:
- dev->stats.rx_errors++;
- /* leave the descriptor untouched */
- continue;
- }
+ page = (struct page *)rxq->buf_virt_addr[index];
+ data = page_address(page);
+ /* Prefetch header */
+ prefetch(data);
- if (rx_bytes <= rx_copybreak) {
- /* better copy a small frame and not unmap the DMA region */
- skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
- if (unlikely(!skb))
- goto err_drop_frame;
-
- dma_sync_single_range_for_cpu(dev->dev.parent,
- phys_addr,
- MVNETA_MH_SIZE + NET_SKB_PAD,
- rx_bytes,
- DMA_FROM_DEVICE);
- skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD,
- rx_bytes);
-
- skb->protocol = eth_type_trans(skb, dev);
- mvneta_rx_csum(pp, rx_status, skb);
- napi_gro_receive(&port->napi, skb);
-
- rcvd_pkts++;
- rcvd_bytes += rx_bytes;
+ phys_addr = rx_desc->buf_phys_addr;
+ rx_status = rx_desc->status;
+ rx_proc++;
+ rxq->refill_num++;
+
+ if (rx_status & MVNETA_RXD_FIRST_DESC) {
+ /* Check errors only for FIRST descriptor */
+ if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
+ mvneta_rx_error(pp, rx_desc);
+ dev->stats.rx_errors++;
+ /* leave the descriptor untouched */
+ continue;
+ }
+ rx_bytes = rx_desc->data_size -
+ (ETH_FCS_LEN + MVNETA_MH_SIZE);
+
+ /* Allocate small skb for each new packet */
+ skb_size = max(rx_copybreak, rx_header_size);
+ rxq->skb = netdev_alloc_skb_ip_align(dev, skb_size);
+ if (unlikely(!rxq->skb)) {
+ netdev_err(dev,
+ "Can't allocate skb on queue %d\n",
+ rxq->id);
+ dev->stats.rx_dropped++;
+ rxq->skb_alloc_err++;
+ continue;
+ }
+ copy_size = min(skb_size, rx_bytes);
+
+ /* Copy data from buffer to SKB, skip Marvell header */
+ memcpy(rxq->skb->data, data + MVNETA_MH_SIZE,
+ copy_size);
+ skb_put(rxq->skb, copy_size);
+ rxq->left_size = rx_bytes - copy_size;
+
+ mvneta_rx_csum(pp, rx_status, rxq->skb);
+ if (rxq->left_size == 0) {
+ int size = copy_size + MVNETA_MH_SIZE;
+
+ dma_sync_single_range_for_cpu(dev->dev.parent,
+ phys_addr, 0,
+ size,
+ DMA_FROM_DEVICE);
+
+ /* leave the descriptor and buffer untouched */
+ } else {
+ /* refill descriptor with new buffer later */
+ rx_desc->buf_phys_addr = 0;
+
+ frag_num = 0;
+ frag_offset = copy_size + MVNETA_MH_SIZE;
+ frag_size = min(rxq->left_size,
+ (int)(PAGE_SIZE - frag_offset));
+ skb_add_rx_frag(rxq->skb, frag_num, page,
+ frag_offset, frag_size,
+ PAGE_SIZE);
+ dma_unmap_single(dev->dev.parent, phys_addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ rxq->left_size -= frag_size;
+ }
+ } else {
+ /* Middle or Last descriptor */
+ if (unlikely(!rxq->skb)) {
+ pr_debug("no skb for rx_status 0x%x\n",
+ rx_status);
+ continue;
+ }
+ if (!rxq->left_size) {
+ /* last descriptor has only FCS */
+ /* and can be discarded */
+ dma_sync_single_range_for_cpu(dev->dev.parent,
+ phys_addr, 0,
+ ETH_FCS_LEN,
+ DMA_FROM_DEVICE);
+ /* leave the descriptor and buffer untouched */
+ } else {
+ /* refill descriptor with new buffer later */
+ rx_desc->buf_phys_addr = 0;
+
+ frag_num = skb_shinfo(rxq->skb)->nr_frags;
+ frag_offset = 0;
+ frag_size = min(rxq->left_size,
+ (int)(PAGE_SIZE - frag_offset));
+ skb_add_rx_frag(rxq->skb, frag_num, page,
+ frag_offset, frag_size,
+ PAGE_SIZE);
+
+ dma_unmap_single(dev->dev.parent, phys_addr,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+
+ rxq->left_size -= frag_size;
+ }
+ } /* Middle or Last descriptor */
- /* leave the descriptor and buffer untouched */
+ if (!(rx_status & MVNETA_RXD_LAST_DESC))
+ /* no last descriptor this time */
continue;
- }
- /* Refill processing */
- err = mvneta_rx_refill(pp, rx_desc, rxq);
- if (err) {
- netdev_err(dev, "Linux processing - Can't refill\n");
- rxq->missed++;
- goto err_drop_frame;
+ if (rxq->left_size) {
+ pr_err("get last desc, but left_size (%d) != 0\n",
+ rxq->left_size);
+ dev_kfree_skb_any(rxq->skb);
+ rxq->left_size = 0;
+ rxq->skb = NULL;
+ continue;
}
-
- frag_size = pp->frag_size;
-
- skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
-
- /* After refill old buffer has to be unmapped regardless
- * the skb is successfully built or not.
- */
- dma_unmap_single(dev->dev.parent, phys_addr,
- MVNETA_RX_BUF_SIZE(pp->pkt_size),
- DMA_FROM_DEVICE);
-
- if (!skb)
- goto err_drop_frame;
-
rcvd_pkts++;
- rcvd_bytes += rx_bytes;
+ rcvd_bytes += rxq->skb->len;
/* Linux processing */
- skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
- skb_put(skb, rx_bytes);
-
- skb->protocol = eth_type_trans(skb, dev);
+ rxq->skb->protocol = eth_type_trans(rxq->skb, dev);
- mvneta_rx_csum(pp, rx_status, skb);
+ if (dev->features & NETIF_F_GRO)
+ napi_gro_receive(napi, rxq->skb);
+ else
+ netif_receive_skb(rxq->skb);
- napi_gro_receive(&port->napi, skb);
+ /* clean uncomplete skb pointer in queue */
+ rxq->skb = NULL;
+ rxq->left_size = 0;
}
if (rcvd_pkts) {
@@ -2013,17 +2084,20 @@ err_drop_frame:
u64_stats_update_end(&stats->syncp);
}
+ /* return some buffers to hardware queue, one at a time is too slow */
+ refill = mvneta_rx_refill_queue(pp, rxq);
+
/* Update rxq management counters */
- mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
+ mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
- return rx_done;
+ return rcvd_pkts;
}
/* Main rx processing when using hardware buffer management */
-static int mvneta_rx_hwbm(struct mvneta_port *pp, int rx_todo,
+static int mvneta_rx_hwbm(struct napi_struct *napi,
+ struct mvneta_port *pp, int rx_todo,
struct mvneta_rx_queue *rxq)
{
- struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
struct net_device *dev = pp->dev;
int rx_done;
u32 rcvd_pkts = 0;
@@ -2085,7 +2159,7 @@ err_drop_frame:
skb->protocol = eth_type_trans(skb, dev);
mvneta_rx_csum(pp, rx_status, skb);
- napi_gro_receive(&port->napi, skb);
+ napi_gro_receive(napi, skb);
rcvd_pkts++;
rcvd_bytes += rx_bytes;
@@ -2102,7 +2176,7 @@ err_drop_frame:
err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
if (err) {
netdev_err(dev, "Linux processing - Can't refill\n");
- rxq->missed++;
+ rxq->refill_err++;
goto err_drop_frame_ret_pool;
}
@@ -2129,7 +2203,7 @@ err_drop_frame:
mvneta_rx_csum(pp, rx_status, skb);
- napi_gro_receive(&port->napi, skb);
+ napi_gro_receive(napi, skb);
}
if (rcvd_pkts) {
@@ -2722,9 +2796,11 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
if (rx_queue) {
rx_queue = rx_queue - 1;
if (pp->bm_priv)
- rx_done = mvneta_rx_hwbm(pp, budget, &pp->rxqs[rx_queue]);
+ rx_done = mvneta_rx_hwbm(napi, pp, budget,
+ &pp->rxqs[rx_queue]);
else
- rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]);
+ rx_done = mvneta_rx_swbm(napi, pp, budget,
+ &pp->rxqs[rx_queue]);
}
if (rx_done < budget) {
@@ -2761,9 +2837,11 @@ static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
for (i = 0; i < num; i++) {
memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
- if (mvneta_rx_refill(pp, rxq->descs + i, rxq) != 0) {
- netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n",
- __func__, rxq->id, i, num);
+ if (mvneta_rx_refill(pp, rxq->descs + i, rxq,
+ GFP_KERNEL) != 0) {
+ netdev_err(pp->dev,
+ "%s:rxq %d, %d of %d buffs filled\n",
+ __func__, rxq->id, i, num);
break;
}
}
@@ -2821,21 +2899,23 @@ static void mvneta_rxq_hw_init(struct mvneta_port *pp,
mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
- /* Set Offset */
- mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD - pp->rx_offset_correction);
-
/* Set coalescing pkts and time */
mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
if (!pp->bm_priv) {
- /* Fill RXQ with buffers from RX pool */
- mvneta_rxq_buf_size_set(pp, rxq,
- MVNETA_RX_BUF_SIZE(pp->pkt_size));
+ /* Set Offset */
+ mvneta_rxq_offset_set(pp, rxq, 0);
+ mvneta_rxq_buf_size_set(pp, rxq, pp->frag_size);
mvneta_rxq_bm_disable(pp, rxq);
mvneta_rxq_fill(pp, rxq, rxq->size);
} else {
+ /* Set Offset */
+ mvneta_rxq_offset_set(pp, rxq,
+ NET_SKB_PAD - pp->rx_offset_correction);
+
mvneta_rxq_bm_enable(pp, rxq);
+ /* Fill RXQ with buffers from RX pool */
mvneta_rxq_long_pool_set(pp, rxq);
mvneta_rxq_short_pool_set(pp, rxq);
mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
@@ -2864,6 +2944,9 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp,
{
mvneta_rxq_drop_pkts(pp, rxq);
+ if (rxq->skb)
+ dev_kfree_skb_any(rxq->skb);
+
if (rxq->descs)
dma_free_coherent(pp->dev->dev.parent,
rxq->size * MVNETA_DESC_ALIGNED_SIZE,
@@ -2874,6 +2957,10 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp,
rxq->last_desc = 0;
rxq->next_desc_to_proc = 0;
rxq->descs_phys = 0;
+ rxq->first_to_refill = 0;
+ rxq->refill_num = 0;
+ rxq->skb = NULL;
+ rxq->left_size = 0;
}
static int mvneta_txq_sw_init(struct mvneta_port *pp,
@@ -3177,8 +3264,6 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
mvneta_bm_update_mtu(pp, mtu);
pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
- pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
ret = mvneta_setup_rxqs(pp);
if (ret) {
@@ -3194,7 +3279,6 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
on_each_cpu(mvneta_percpu_enable, pp, true);
mvneta_start_dev(pp);
- mvneta_port_up(pp);
netdev_update_features(dev);
@@ -3666,8 +3750,7 @@ static int mvneta_open(struct net_device *dev)
int ret;
pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
- pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ pp->frag_size = PAGE_SIZE;
ret = mvneta_setup_rxqs(pp);
if (ret)
@@ -3962,6 +4045,12 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
case ETHTOOL_STAT_EEE_WAKEUP:
val = phylink_get_eee_err(pp->phylink);
break;
+ case ETHTOOL_STAT_SKB_ALLOC_ERR:
+ val = pp->rxqs[0].skb_alloc_err;
+ break;
+ case ETHTOOL_STAT_REFILL_ERR:
+ val = pp->rxqs[0].refill_err;
+ break;
}
break;
}
@@ -4018,13 +4107,18 @@ static int mvneta_config_rss(struct mvneta_port *pp)
on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
- /* We have to synchronise on the napi of each CPU */
- for_each_online_cpu(cpu) {
- struct mvneta_pcpu_port *pcpu_port =
- per_cpu_ptr(pp->ports, cpu);
+ if (!pp->neta_armada3700) {
+ /* We have to synchronise on the napi of each CPU */
+ for_each_online_cpu(cpu) {
+ struct mvneta_pcpu_port *pcpu_port =
+ per_cpu_ptr(pp->ports, cpu);
- napi_synchronize(&pcpu_port->napi);
- napi_disable(&pcpu_port->napi);
+ napi_synchronize(&pcpu_port->napi);
+ napi_disable(&pcpu_port->napi);
+ }
+ } else {
+ napi_synchronize(&pp->napi);
+ napi_disable(&pp->napi);
}
pp->rxq_def = pp->indir[0];
@@ -4041,12 +4135,16 @@ static int mvneta_config_rss(struct mvneta_port *pp)
mvneta_percpu_elect(pp);
spin_unlock(&pp->lock);
- /* We have to synchronise on the napi of each CPU */
- for_each_online_cpu(cpu) {
- struct mvneta_pcpu_port *pcpu_port =
- per_cpu_ptr(pp->ports, cpu);
+ if (!pp->neta_armada3700) {
+ /* We have to synchronise on the napi of each CPU */
+ for_each_online_cpu(cpu) {
+ struct mvneta_pcpu_port *pcpu_port =
+ per_cpu_ptr(pp->ports, cpu);
- napi_enable(&pcpu_port->napi);
+ napi_enable(&pcpu_port->napi);
+ }
+ } else {
+ napi_enable(&pp->napi);
}
netif_tx_start_all_queues(pp->dev);
@@ -4362,14 +4460,6 @@ static int mvneta_probe(struct platform_device *pdev)
pp->dn = dn;
pp->rxq_def = rxq_def;
-
- /* Set RX packet offset correction for platforms, whose
- * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
- * platforms and 0B for 32-bit ones.
- */
- pp->rx_offset_correction =
- max(0, NET_SKB_PAD - MVNETA_RX_PKT_OFFSET_CORRECTION);
-
pp->indir[0] = rxq_def;
/* Get special SoC configurations */
@@ -4457,16 +4547,28 @@ static int mvneta_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
pp->id = global_port_id++;
+ pp->rx_offset_correction = 0; /* not relevant for SW BM */
/* Obtain access to BM resources if enabled and already initialized */
bm_node = of_parse_phandle(dn, "buffer-manager", 0);
- if (bm_node && bm_node->data) {
- pp->bm_priv = bm_node->data;
- err = mvneta_bm_port_init(pdev, pp);
- if (err < 0) {
- dev_info(&pdev->dev, "use SW buffer management\n");
- pp->bm_priv = NULL;
+ if (bm_node) {
+ pp->bm_priv = mvneta_bm_get(bm_node);
+ if (pp->bm_priv) {
+ err = mvneta_bm_port_init(pdev, pp);
+ if (err < 0) {
+ dev_info(&pdev->dev,
+ "use SW buffer management\n");
+ mvneta_bm_put(pp->bm_priv);
+ pp->bm_priv = NULL;
+ }
}
+ /* Set RX packet offset correction for platforms, whose
+ * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
+ * platforms and 0B for 32-bit ones.
+ */
+ pp->rx_offset_correction = max(0,
+ NET_SKB_PAD -
+ MVNETA_RX_PKT_OFFSET_CORRECTION);
}
of_node_put(bm_node);
@@ -4526,6 +4628,7 @@ err_netdev:
mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
1 << pp->id);
+ mvneta_bm_put(pp->bm_priv);
}
err_free_stats:
free_percpu(pp->stats);
@@ -4563,6 +4666,7 @@ static int mvneta_remove(struct platform_device *pdev)
mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
1 << pp->id);
+ mvneta_bm_put(pp->bm_priv);
}
return 0;
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c
index 466939f8f0cf..de468e1bdba9 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.c
+++ b/drivers/net/ethernet/marvell/mvneta_bm.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/skbuff.h>
#include <net/hwbm.h>
@@ -392,6 +393,20 @@ static void mvneta_bm_put_sram(struct mvneta_bm *priv)
MVNETA_BM_BPPI_SIZE);
}
+struct mvneta_bm *mvneta_bm_get(struct device_node *node)
+{
+ struct platform_device *pdev = of_find_device_by_node(node);
+
+ return pdev ? platform_get_drvdata(pdev) : NULL;
+}
+EXPORT_SYMBOL_GPL(mvneta_bm_get);
+
+void mvneta_bm_put(struct mvneta_bm *priv)
+{
+ platform_device_put(priv->pdev);
+}
+EXPORT_SYMBOL_GPL(mvneta_bm_put);
+
static int mvneta_bm_probe(struct platform_device *pdev)
{
struct device_node *dn = pdev->dev.of_node;
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.h b/drivers/net/ethernet/marvell/mvneta_bm.h
index a32de432800c..c8425d35c049 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.h
+++ b/drivers/net/ethernet/marvell/mvneta_bm.h
@@ -130,10 +130,10 @@ struct mvneta_bm_pool {
};
/* Declarations and definitions */
-void *mvneta_frag_alloc(unsigned int frag_size);
-void mvneta_frag_free(unsigned int frag_size, void *data);
-
#if IS_ENABLED(CONFIG_MVNETA_BM)
+struct mvneta_bm *mvneta_bm_get(struct device_node *node);
+void mvneta_bm_put(struct mvneta_bm *priv);
+
void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
struct mvneta_bm_pool *bm_pool, u8 port_map);
void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
@@ -178,5 +178,7 @@ static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv,
static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv,
struct mvneta_bm_pool *bm_pool)
{ return 0; }
+struct mvneta_bm *mvneta_bm_get(struct device_node *node) { return NULL; }
+void mvneta_bm_put(struct mvneta_bm *priv) {}
#endif /* CONFIG_MVNETA_BM */
#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/Makefile b/drivers/net/ethernet/marvell/mvpp2/Makefile
index 4d11dd9e3246..51f65a202c6e 100644
--- a/drivers/net/ethernet/marvell/mvpp2/Makefile
+++ b/drivers/net/ethernet/marvell/mvpp2/Makefile
@@ -4,4 +4,4 @@
#
obj-$(CONFIG_MVPP2) := mvpp2.o
-mvpp2-objs := mvpp2_main.o mvpp2_prs.o mvpp2_cls.o
+mvpp2-objs := mvpp2_main.o mvpp2_prs.o mvpp2_cls.o mvpp2_debugfs.o
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index def00dc3eb4e..67b9e81b7c02 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -1,17 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions for Marvell PPv2 network controller for Armada 375 SoC.
*
* Copyright (C) 2014 Marvell
*
* Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#ifndef _MVPP2_H_
#define _MVPP2_H_
+#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
@@ -66,15 +64,18 @@
#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
+#define MVPP2_PRS_TCAM_HIT_IDX_REG 0x1240
+#define MVPP2_PRS_TCAM_HIT_CNT_REG 0x1244
+#define MVPP2_PRS_TCAM_HIT_CNT_MASK GENMASK(15, 0)
/* RSS Registers */
#define MVPP22_RSS_INDEX 0x1500
#define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) (idx)
#define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8)
#define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16)
-#define MVPP22_RSS_TABLE_ENTRY 0x1508
-#define MVPP22_RSS_TABLE 0x1510
+#define MVPP22_RXQ2RSS_TABLE 0x1504
#define MVPP22_RSS_TABLE_POINTER(p) (p)
+#define MVPP22_RSS_TABLE_ENTRY 0x1508
#define MVPP22_RSS_WIDTH 0x150c
/* Classifier Registers */
@@ -86,11 +87,28 @@
#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
#define MVPP2_CLS_LKP_TBL_REG 0x1818
#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
+#define MVPP2_CLS_LKP_FLOW_PTR(flow) ((flow) << 16)
#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
#define MVPP2_CLS_FLOW_INDEX_REG 0x1820
#define MVPP2_CLS_FLOW_TBL0_REG 0x1824
+#define MVPP2_CLS_FLOW_TBL0_LAST BIT(0)
+#define MVPP2_CLS_FLOW_TBL0_ENG_MASK 0x7
+#define MVPP2_CLS_FLOW_TBL0_OFFS 1
+#define MVPP2_CLS_FLOW_TBL0_ENG(x) ((x) << 1)
+#define MVPP2_CLS_FLOW_TBL0_PORT_ID_MASK 0xff
+#define MVPP2_CLS_FLOW_TBL0_PORT_ID(port) ((port) << 4)
+#define MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL BIT(23)
#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
+#define MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK 0x7
+#define MVPP2_CLS_FLOW_TBL1_N_FIELDS(x) (x)
+#define MVPP2_CLS_FLOW_TBL1_PRIO_MASK 0x3f
+#define MVPP2_CLS_FLOW_TBL1_PRIO(x) ((x) << 9)
+#define MVPP2_CLS_FLOW_TBL1_SEQ_MASK 0x7
+#define MVPP2_CLS_FLOW_TBL1_SEQ(x) ((x) << 15)
#define MVPP2_CLS_FLOW_TBL2_REG 0x182c
+#define MVPP2_CLS_FLOW_TBL2_FLD_MASK 0x3f
+#define MVPP2_CLS_FLOW_TBL2_FLD_OFFS(n) ((n) * 6)
+#define MVPP2_CLS_FLOW_TBL2_FLD(n, x) ((x) << ((n) * 6))
#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
@@ -98,6 +116,32 @@
#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
+/* Classifier C2 engine Registers */
+#define MVPP22_CLS_C2_TCAM_IDX 0x1b00
+#define MVPP22_CLS_C2_TCAM_DATA0 0x1b10
+#define MVPP22_CLS_C2_TCAM_DATA1 0x1b14
+#define MVPP22_CLS_C2_TCAM_DATA2 0x1b18
+#define MVPP22_CLS_C2_TCAM_DATA3 0x1b1c
+#define MVPP22_CLS_C2_TCAM_DATA4 0x1b20
+#define MVPP22_CLS_C2_PORT_ID(port) ((port) << 8)
+#define MVPP22_CLS_C2_HIT_CTR 0x1b50
+#define MVPP22_CLS_C2_ACT 0x1b60
+#define MVPP22_CLS_C2_ACT_RSS_EN(act) (((act) & 0x3) << 19)
+#define MVPP22_CLS_C2_ACT_FWD(act) (((act) & 0x7) << 13)
+#define MVPP22_CLS_C2_ACT_QHIGH(act) (((act) & 0x3) << 11)
+#define MVPP22_CLS_C2_ACT_QLOW(act) (((act) & 0x3) << 9)
+#define MVPP22_CLS_C2_ATTR0 0x1b64
+#define MVPP22_CLS_C2_ATTR0_QHIGH(qh) (((qh) & 0x1f) << 24)
+#define MVPP22_CLS_C2_ATTR0_QHIGH_MASK 0x1f
+#define MVPP22_CLS_C2_ATTR0_QHIGH_OFFS 24
+#define MVPP22_CLS_C2_ATTR0_QLOW(ql) (((ql) & 0x7) << 21)
+#define MVPP22_CLS_C2_ATTR0_QLOW_MASK 0x7
+#define MVPP22_CLS_C2_ATTR0_QLOW_OFFS 21
+#define MVPP22_CLS_C2_ATTR1 0x1b68
+#define MVPP22_CLS_C2_ATTR2 0x1b6c
+#define MVPP22_CLS_C2_ATTR2_RSS_EN BIT(30)
+#define MVPP22_CLS_C2_ATTR3 0x1b70
+
/* Descriptor Manager Top Registers */
#define MVPP2_RXQ_NUM_REG 0x2040
#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
@@ -275,6 +319,11 @@
#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
+/* Hit counters registers */
+#define MVPP2_CTRS_IDX 0x7040
+#define MVPP2_CLS_DEC_TBL_HIT_CTR 0x7700
+#define MVPP2_CLS_FLOW_TBL_HIT_CTR 0x7704
+
/* TX Scheduler registers */
#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
@@ -499,7 +548,7 @@
#define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
/* Dfault number of RXQs in use */
-#define MVPP2_DEFAULT_RXQ 4
+#define MVPP2_DEFAULT_RXQ 1
/* Max number of Rx descriptors */
#define MVPP2_MAX_RXD_MAX 1024
@@ -553,6 +602,11 @@
((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
+#define MVPP2_BIT_TO_WORD(bit) ((bit) / 32)
+#define MVPP2_BIT_IN_WORD(bit) ((bit) % 32)
+
+/* RSS constants */
+#define MVPP22_RSS_TABLE_ENTRIES 32
/* IPv6 max L3 address size */
#define MVPP2_MAX_L3_ADDR_SIZE 16
@@ -703,6 +757,9 @@ struct mvpp2 {
/* Workqueue to gather hardware statistics */
char queue_name[30];
struct workqueue_struct *stats_queue;
+
+ /* Debugfs root entry */
+ struct dentry *dbgfs_dir;
};
struct mvpp2_pcpu_stats {
@@ -795,6 +852,9 @@ struct mvpp2_port {
bool has_tx_irqs;
u32 tx_time_coal;
+
+ /* RSS indirection table */
+ u32 indir[MVPP22_RSS_TABLE_ENTRIES];
};
/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
@@ -831,52 +891,52 @@ struct mvpp2_port {
/* HW TX descriptor for PPv2.1 */
struct mvpp21_tx_desc {
- u32 command; /* Options used by HW for packet transmitting.*/
+ __le32 command; /* Options used by HW for packet transmitting.*/
u8 packet_offset; /* the offset from the buffer beginning */
u8 phys_txq; /* destination queue ID */
- u16 data_size; /* data size of transmitted packet in bytes */
- u32 buf_dma_addr; /* physical addr of transmitted buffer */
- u32 buf_cookie; /* cookie for access to TX buffer in tx path */
- u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
- u32 reserved2; /* reserved (for future use) */
+ __le16 data_size; /* data size of transmitted packet in bytes */
+ __le32 buf_dma_addr; /* physical addr of transmitted buffer */
+ __le32 buf_cookie; /* cookie for access to TX buffer in tx path */
+ __le32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
+ __le32 reserved2; /* reserved (for future use) */
};
/* HW RX descriptor for PPv2.1 */
struct mvpp21_rx_desc {
- u32 status; /* info about received packet */
- u16 reserved1; /* parser_info (for future use, PnC) */
- u16 data_size; /* size of received packet in bytes */
- u32 buf_dma_addr; /* physical address of the buffer */
- u32 buf_cookie; /* cookie for access to RX buffer in rx path */
- u16 reserved2; /* gem_port_id (for future use, PON) */
- u16 reserved3; /* csum_l4 (for future use, PnC) */
+ __le32 status; /* info about received packet */
+ __le16 reserved1; /* parser_info (for future use, PnC) */
+ __le16 data_size; /* size of received packet in bytes */
+ __le32 buf_dma_addr; /* physical address of the buffer */
+ __le32 buf_cookie; /* cookie for access to RX buffer in rx path */
+ __le16 reserved2; /* gem_port_id (for future use, PON) */
+ __le16 reserved3; /* csum_l4 (for future use, PnC) */
u8 reserved4; /* bm_qset (for future use, BM) */
u8 reserved5;
- u16 reserved6; /* classify_info (for future use, PnC) */
- u32 reserved7; /* flow_id (for future use, PnC) */
- u32 reserved8;
+ __le16 reserved6; /* classify_info (for future use, PnC) */
+ __le32 reserved7; /* flow_id (for future use, PnC) */
+ __le32 reserved8;
};
/* HW TX descriptor for PPv2.2 */
struct mvpp22_tx_desc {
- u32 command;
+ __le32 command;
u8 packet_offset;
u8 phys_txq;
- u16 data_size;
- u64 reserved1;
- u64 buf_dma_addr_ptp;
- u64 buf_cookie_misc;
+ __le16 data_size;
+ __le64 reserved1;
+ __le64 buf_dma_addr_ptp;
+ __le64 buf_cookie_misc;
};
/* HW RX descriptor for PPv2.2 */
struct mvpp22_rx_desc {
- u32 status;
- u16 reserved1;
- u16 data_size;
- u32 reserved2;
- u32 reserved3;
- u64 buf_dma_addr_key_hash;
- u64 buf_cookie_misc;
+ __le32 status;
+ __le16 reserved1;
+ __le16 data_size;
+ __le32 reserved2;
+ __le32 reserved3;
+ __le64 buf_dma_addr_key_hash;
+ __le64 buf_cookie_misc;
};
/* Opaque type used by the driver to manipulate the HW TX and RX
@@ -1043,4 +1103,8 @@ u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, u32 offset);
void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, u32 offset,
u32 data);
+void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name);
+
+void mvpp2_dbgfs_cleanup(struct mvpp2 *priv);
+
#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index 8581d5b17dd5..efdb7a656835 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -1,17 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* RSS and Classifier helpers for Marvell PPv2 Network Controller
*
* Copyright (C) 2014 Marvell
*
* Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include "mvpp2.h"
#include "mvpp2_cls.h"
+#include "mvpp2_prs.h"
+
+#define MVPP2_DEF_FLOW(_type, _id, _opts, _ri, _ri_mask) \
+{ \
+ .flow_type = _type, \
+ .flow_id = _id, \
+ .supported_hash_opts = _opts, \
+ .prs_ri = { \
+ .ri = _ri, \
+ .ri_mask = _ri_mask \
+ } \
+}
+
+static struct mvpp2_cls_flow cls_flows[MVPP2_N_FLOWS] = {
+ /* TCP over IPv4 flows, Not fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* TCP over IPv4 flows, Not fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ /* TCP over IPv4 flows, fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* TCP over IPv4 flows, fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ /* UDP over IPv4 flows, Not fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* UDP over IPv4 flows, Not fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ /* UDP over IPv4 flows, fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* UDP over IPv4 flows, fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ /* TCP over IPv6 flows, not fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP6_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP6_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* TCP over IPv6 flows, not fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ /* TCP over IPv6 flows, fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* TCP over IPv6 flows, fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ /* UDP over IPv6 flows, not fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP6_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP6_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* UDP over IPv6 flows, not fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ /* UDP over IPv6 flows, fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* UDP over IPv6 flows, fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ /* IPv4 flows, no vlan tag */
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT,
+ MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER,
+ MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+
+ /* IPv4 flows, with vlan tag */
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OPT,
+ MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OTHER,
+ MVPP2_PRS_RI_L3_PROTO_MASK),
+
+ /* IPv6 flows, no vlan tag */
+ MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+
+ /* IPv6 flows, with vlan tag */
+ MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_L3_PROTO_MASK),
+
+ /* Non IP flow, no vlan tag */
+ MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_UNTAG,
+ 0,
+ MVPP2_PRS_RI_VLAN_NONE,
+ MVPP2_PRS_RI_VLAN_MASK),
+ /* Non IP flow, with vlan tag */
+ MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_TAG,
+ MVPP22_CLS_HEK_OPT_VLAN,
+ 0, 0),
+};
+
+u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index)
+{
+ mvpp2_write(priv, MVPP2_CTRS_IDX, index);
+
+ return mvpp2_read(priv, MVPP2_CLS_FLOW_TBL_HIT_CTR);
+}
+
+void mvpp2_cls_flow_read(struct mvpp2 *priv, int index,
+ struct mvpp2_cls_flow_entry *fe)
+{
+ fe->index = index;
+ mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, index);
+ fe->data[0] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL0_REG);
+ fe->data[1] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL1_REG);
+ fe->data[2] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL2_REG);
+}
/* Update classification flow table registers */
static void mvpp2_cls_flow_write(struct mvpp2 *priv,
@@ -23,6 +349,25 @@ static void mvpp2_cls_flow_write(struct mvpp2 *priv,
mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
}
+u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index)
+{
+ mvpp2_write(priv, MVPP2_CTRS_IDX, index);
+
+ return mvpp2_read(priv, MVPP2_CLS_DEC_TBL_HIT_CTR);
+}
+
+void mvpp2_cls_lookup_read(struct mvpp2 *priv, int lkpid, int way,
+ struct mvpp2_cls_lookup_entry *le)
+{
+ u32 val;
+
+ val = (way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | lkpid;
+ mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
+ le->way = way;
+ le->lkpid = lkpid;
+ le->data = mvpp2_read(priv, MVPP2_CLS_LKP_TBL_REG);
+}
+
/* Update classification lookup table register */
static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
struct mvpp2_cls_lookup_entry *le)
@@ -34,6 +379,439 @@ static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
}
+/* Operations on flow entry */
+static int mvpp2_cls_flow_hek_num_get(struct mvpp2_cls_flow_entry *fe)
+{
+ return fe->data[1] & MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK;
+}
+
+static void mvpp2_cls_flow_hek_num_set(struct mvpp2_cls_flow_entry *fe,
+ int num_of_fields)
+{
+ fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK;
+ fe->data[1] |= MVPP2_CLS_FLOW_TBL1_N_FIELDS(num_of_fields);
+}
+
+static int mvpp2_cls_flow_hek_get(struct mvpp2_cls_flow_entry *fe,
+ int field_index)
+{
+ return (fe->data[2] >> MVPP2_CLS_FLOW_TBL2_FLD_OFFS(field_index)) &
+ MVPP2_CLS_FLOW_TBL2_FLD_MASK;
+}
+
+static void mvpp2_cls_flow_hek_set(struct mvpp2_cls_flow_entry *fe,
+ int field_index, int field_id)
+{
+ fe->data[2] &= ~MVPP2_CLS_FLOW_TBL2_FLD(field_index,
+ MVPP2_CLS_FLOW_TBL2_FLD_MASK);
+ fe->data[2] |= MVPP2_CLS_FLOW_TBL2_FLD(field_index, field_id);
+}
+
+static void mvpp2_cls_flow_eng_set(struct mvpp2_cls_flow_entry *fe,
+ int engine)
+{
+ fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_ENG(MVPP2_CLS_FLOW_TBL0_ENG_MASK);
+ fe->data[0] |= MVPP2_CLS_FLOW_TBL0_ENG(engine);
+}
+
+int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe)
+{
+ return (fe->data[0] >> MVPP2_CLS_FLOW_TBL0_OFFS) &
+ MVPP2_CLS_FLOW_TBL0_ENG_MASK;
+}
+
+static void mvpp2_cls_flow_port_id_sel(struct mvpp2_cls_flow_entry *fe,
+ bool from_packet)
+{
+ if (from_packet)
+ fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
+ else
+ fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
+}
+
+static void mvpp2_cls_flow_seq_set(struct mvpp2_cls_flow_entry *fe, u32 seq)
+{
+ fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_SEQ(MVPP2_CLS_FLOW_TBL1_SEQ_MASK);
+ fe->data[1] |= MVPP2_CLS_FLOW_TBL1_SEQ(seq);
+}
+
+static void mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry *fe,
+ bool is_last)
+{
+ fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_LAST;
+ fe->data[0] |= !!is_last;
+}
+
+static void mvpp2_cls_flow_pri_set(struct mvpp2_cls_flow_entry *fe, int prio)
+{
+ fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_PRIO(MVPP2_CLS_FLOW_TBL1_PRIO_MASK);
+ fe->data[1] |= MVPP2_CLS_FLOW_TBL1_PRIO(prio);
+}
+
+static void mvpp2_cls_flow_port_add(struct mvpp2_cls_flow_entry *fe,
+ u32 port)
+{
+ fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
+}
+
+/* Initialize the parser entry for the given flow */
+static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv,
+ struct mvpp2_cls_flow *flow)
+{
+ mvpp2_prs_add_flow(priv, flow->flow_id, flow->prs_ri.ri,
+ flow->prs_ri.ri_mask);
+}
+
+/* Initialize the Lookup Id table entry for the given flow */
+static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
+ struct mvpp2_cls_flow *flow)
+{
+ struct mvpp2_cls_lookup_entry le;
+
+ le.way = 0;
+ le.lkpid = flow->flow_id;
+
+ /* The default RxQ for this port is set in the C2 lookup */
+ le.data = 0;
+
+ /* We point on the first lookup in the sequence for the flow, that is
+ * the C2 lookup.
+ */
+ le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_FLOW_C2_ENTRY(flow->flow_id));
+
+ /* CLS is always enabled, RSS is enabled/disabled in C2 lookup */
+ le.data |= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
+
+ mvpp2_cls_lookup_write(priv, &le);
+}
+
+/* Initialize the flow table entries for the given flow */
+static void mvpp2_cls_flow_init(struct mvpp2 *priv, struct mvpp2_cls_flow *flow)
+{
+ struct mvpp2_cls_flow_entry fe;
+ int i;
+
+ /* C2 lookup */
+ memset(&fe, 0, sizeof(fe));
+ fe.index = MVPP2_FLOW_C2_ENTRY(flow->flow_id);
+
+ mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2);
+ mvpp2_cls_flow_port_id_sel(&fe, true);
+ mvpp2_cls_flow_last_set(&fe, 0);
+ mvpp2_cls_flow_pri_set(&fe, 0);
+ mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_FIRST1);
+
+ /* Add all ports */
+ for (i = 0; i < MVPP2_MAX_PORTS; i++)
+ mvpp2_cls_flow_port_add(&fe, BIT(i));
+
+ mvpp2_cls_flow_write(priv, &fe);
+
+ /* C3Hx lookups */
+ for (i = 0; i < MVPP2_MAX_PORTS; i++) {
+ memset(&fe, 0, sizeof(fe));
+ fe.index = MVPP2_PORT_FLOW_HASH_ENTRY(i, flow->flow_id);
+
+ mvpp2_cls_flow_port_id_sel(&fe, true);
+ mvpp2_cls_flow_pri_set(&fe, i + 1);
+ mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_MIDDLE);
+ mvpp2_cls_flow_port_add(&fe, BIT(i));
+
+ mvpp2_cls_flow_write(priv, &fe);
+ }
+
+ /* Update the last entry */
+ mvpp2_cls_flow_last_set(&fe, 1);
+ mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_LAST);
+
+ mvpp2_cls_flow_write(priv, &fe);
+}
+
+/* Adds a field to the Header Extracted Key generation parameters*/
+static int mvpp2_flow_add_hek_field(struct mvpp2_cls_flow_entry *fe,
+ u32 field_id)
+{
+ int nb_fields = mvpp2_cls_flow_hek_num_get(fe);
+
+ if (nb_fields == MVPP2_FLOW_N_FIELDS)
+ return -EINVAL;
+
+ mvpp2_cls_flow_hek_set(fe, nb_fields, field_id);
+
+ mvpp2_cls_flow_hek_num_set(fe, nb_fields + 1);
+
+ return 0;
+}
+
+static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe,
+ unsigned long hash_opts)
+{
+ u32 field_id;
+ int i;
+
+ /* Clear old fields */
+ mvpp2_cls_flow_hek_num_set(fe, 0);
+ fe->data[2] = 0;
+
+ for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
+ switch (BIT(i)) {
+ case MVPP22_CLS_HEK_OPT_VLAN:
+ field_id = MVPP22_CLS_FIELD_VLAN;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP4SA:
+ field_id = MVPP22_CLS_FIELD_IP4SA;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP4DA:
+ field_id = MVPP22_CLS_FIELD_IP4DA;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP6SA:
+ field_id = MVPP22_CLS_FIELD_IP6SA;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP6DA:
+ field_id = MVPP22_CLS_FIELD_IP6DA;
+ break;
+ case MVPP22_CLS_HEK_OPT_L4SIP:
+ field_id = MVPP22_CLS_FIELD_L4SIP;
+ break;
+ case MVPP22_CLS_HEK_OPT_L4DIP:
+ field_id = MVPP22_CLS_FIELD_L4DIP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (mvpp2_flow_add_hek_field(fe, field_id))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
+{
+ if (flow >= MVPP2_N_FLOWS)
+ return NULL;
+
+ return &cls_flows[flow];
+}
+
+/* Set the hash generation options for the given traffic flow.
+ * One traffic flow (in the ethtool sense) has multiple classification flows,
+ * to handle specific cases such as fragmentation, or the presence of a
+ * VLAN / DSA Tag.
+ *
+ * Each of these individual flows has different constraints, for example we
+ * can't hash fragmented packets on L4 data (else we would risk having packet
+ * re-ordering), so each classification flows masks the options with their
+ * supported ones.
+ *
+ */
+static int mvpp2_port_rss_hash_opts_set(struct mvpp2_port *port, int flow_type,
+ u16 requested_opts)
+{
+ struct mvpp2_cls_flow_entry fe;
+ struct mvpp2_cls_flow *flow;
+ int i, engine, flow_index;
+ u16 hash_opts;
+
+ for (i = 0; i < MVPP2_N_FLOWS; i++) {
+ flow = mvpp2_cls_flow_get(i);
+ if (!flow)
+ return -EINVAL;
+
+ if (flow->flow_type != flow_type)
+ continue;
+
+ flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id,
+ flow->flow_id);
+
+ mvpp2_cls_flow_read(port->priv, flow_index, &fe);
+
+ hash_opts = flow->supported_hash_opts & requested_opts;
+
+ /* Use C3HB engine to access L4 infos. This adds L4 infos to the
+ * hash parameters
+ */
+ if (hash_opts & MVPP22_CLS_HEK_L4_OPTS)
+ engine = MVPP22_CLS_ENGINE_C3HB;
+ else
+ engine = MVPP22_CLS_ENGINE_C3HA;
+
+ if (mvpp2_flow_set_hek_fields(&fe, hash_opts))
+ return -EINVAL;
+
+ mvpp2_cls_flow_eng_set(&fe, engine);
+
+ mvpp2_cls_flow_write(port->priv, &fe);
+ }
+
+ return 0;
+}
+
+u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe)
+{
+ u16 hash_opts = 0;
+ int n_fields, i, field;
+
+ n_fields = mvpp2_cls_flow_hek_num_get(fe);
+
+ for (i = 0; i < n_fields; i++) {
+ field = mvpp2_cls_flow_hek_get(fe, i);
+
+ switch (field) {
+ case MVPP22_CLS_FIELD_MAC_DA:
+ hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
+ break;
+ case MVPP22_CLS_FIELD_VLAN:
+ hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
+ break;
+ case MVPP22_CLS_FIELD_L3_PROTO:
+ hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
+ break;
+ case MVPP22_CLS_FIELD_IP4SA:
+ hash_opts |= MVPP22_CLS_HEK_OPT_IP4SA;
+ break;
+ case MVPP22_CLS_FIELD_IP4DA:
+ hash_opts |= MVPP22_CLS_HEK_OPT_IP4DA;
+ break;
+ case MVPP22_CLS_FIELD_IP6SA:
+ hash_opts |= MVPP22_CLS_HEK_OPT_IP6SA;
+ break;
+ case MVPP22_CLS_FIELD_IP6DA:
+ hash_opts |= MVPP22_CLS_HEK_OPT_IP6DA;
+ break;
+ case MVPP22_CLS_FIELD_L4SIP:
+ hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
+ break;
+ case MVPP22_CLS_FIELD_L4DIP:
+ hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
+ break;
+ default:
+ break;
+ }
+ }
+ return hash_opts;
+}
+
+/* Returns the hash opts for this flow. There are several classifier flows
+ * for one traffic flow, this returns an aggregation of all configurations.
+ */
+static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type)
+{
+ struct mvpp2_cls_flow_entry fe;
+ struct mvpp2_cls_flow *flow;
+ int i, flow_index;
+ u16 hash_opts = 0;
+
+ for (i = 0; i < MVPP2_N_FLOWS; i++) {
+ flow = mvpp2_cls_flow_get(i);
+ if (!flow)
+ return 0;
+
+ if (flow->flow_type != flow_type)
+ continue;
+
+ flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id,
+ flow->flow_id);
+
+ mvpp2_cls_flow_read(port->priv, flow_index, &fe);
+
+ hash_opts |= mvpp2_flow_get_hek_fields(&fe);
+ }
+
+ return hash_opts;
+}
+
+static void mvpp2_cls_port_init_flows(struct mvpp2 *priv)
+{
+ struct mvpp2_cls_flow *flow;
+ int i;
+
+ for (i = 0; i < MVPP2_N_FLOWS; i++) {
+ flow = mvpp2_cls_flow_get(i);
+ if (!flow)
+ break;
+
+ mvpp2_cls_flow_prs_init(priv, flow);
+ mvpp2_cls_flow_lkp_init(priv, flow);
+ mvpp2_cls_flow_init(priv, flow);
+ }
+}
+
+static void mvpp2_cls_c2_write(struct mvpp2 *priv,
+ struct mvpp2_cls_c2_entry *c2)
+{
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index);
+
+ /* Write TCAM */
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]);
+
+ mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act);
+
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]);
+}
+
+void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
+ struct mvpp2_cls_c2_entry *c2)
+{
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index);
+
+ c2->index = index;
+
+ c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0);
+ c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1);
+ c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2);
+ c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3);
+ c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4);
+
+ c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT);
+
+ c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0);
+ c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1);
+ c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2);
+ c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3);
+}
+
+static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
+{
+ struct mvpp2_cls_c2_entry c2;
+ u8 qh, ql, pmap;
+
+ memset(&c2, 0, sizeof(c2));
+
+ c2.index = MVPP22_CLS_C2_RSS_ENTRY(port->id);
+
+ pmap = BIT(port->id);
+ c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
+ c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
+
+ /* Update RSS status after matching this entry */
+ c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
+
+ /* Mark packet as "forwarded to software", needed for RSS */
+ c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK);
+
+ /* Configure the default rx queue : Update Queue Low and Queue High, but
+ * don't lock, since the rx queue selection might be overridden by RSS
+ */
+ c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD) |
+ MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD);
+
+ qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+ ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+
+ c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
+ MVPP22_CLS_C2_ATTR0_QLOW(ql);
+
+ mvpp2_cls_c2_write(port->priv, &c2);
+}
+
/* Classifier default initialization */
void mvpp2_cls_init(struct mvpp2 *priv)
{
@@ -61,6 +839,8 @@ void mvpp2_cls_init(struct mvpp2 *priv)
le.way = 1;
mvpp2_cls_lookup_write(priv, &le);
}
+
+ mvpp2_cls_port_init_flows(priv);
}
void mvpp2_cls_port_config(struct mvpp2_port *port)
@@ -89,6 +869,47 @@ void mvpp2_cls_port_config(struct mvpp2_port *port)
/* Update lookup ID table entry */
mvpp2_cls_lookup_write(port->priv, &le);
+
+ mvpp2_port_c2_cls_init(port);
+}
+
+u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index)
+{
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2_index);
+
+ return mvpp2_read(priv, MVPP22_CLS_C2_HIT_CTR);
+}
+
+static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port)
+{
+ struct mvpp2_cls_c2_entry c2;
+
+ mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+
+ c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
+
+ mvpp2_cls_c2_write(port->priv, &c2);
+}
+
+static void mvpp2_rss_port_c2_disable(struct mvpp2_port *port)
+{
+ struct mvpp2_cls_c2_entry c2;
+
+ mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+
+ c2.attr[2] &= ~MVPP22_CLS_C2_ATTR2_RSS_EN;
+
+ mvpp2_cls_c2_write(port->priv, &c2);
+}
+
+void mvpp22_rss_enable(struct mvpp2_port *port)
+{
+ mvpp2_rss_port_c2_enable(port);
+}
+
+void mvpp22_rss_disable(struct mvpp2_port *port)
+{
+ mvpp2_rss_port_c2_disable(port);
}
/* Set CPU queue number for oversize packets */
@@ -107,7 +928,116 @@ void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
}
-void mvpp22_init_rss(struct mvpp2_port *port)
+static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq)
+{
+ int nrxqs, cpu, cpus = num_possible_cpus();
+
+ /* Number of RXQs per CPU */
+ nrxqs = port->nrxqs / cpus;
+
+ /* CPU that will handle this rx queue */
+ cpu = rxq / nrxqs;
+
+ if (!cpu_online(cpu))
+ return port->first_rxq;
+
+ /* Indirection to better distribute the paquets on the CPUs when
+ * configuring the RSS queues.
+ */
+ return port->first_rxq + ((rxq * nrxqs + rxq / cpus) % port->nrxqs);
+}
+
+void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table)
+{
+ struct mvpp2 *priv = port->priv;
+ int i;
+
+ for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
+ u32 sel = MVPP22_RSS_INDEX_TABLE(table) |
+ MVPP22_RSS_INDEX_TABLE_ENTRY(i);
+ mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
+
+ mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY,
+ mvpp22_rxfh_indir(port, port->indir[i]));
+ }
+}
+
+int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
+{
+ u16 hash_opts = 0;
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ if (info->data & RXH_L4_B_0_1)
+ hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
+ if (info->data & RXH_L4_B_2_3)
+ hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
+ /* Fallthrough */
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ if (info->data & RXH_L2DA)
+ hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
+ if (info->data & RXH_VLAN)
+ hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
+ if (info->data & RXH_L3_PROTO)
+ hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
+ if (info->data & RXH_IP_SRC)
+ hash_opts |= (MVPP22_CLS_HEK_OPT_IP4SA |
+ MVPP22_CLS_HEK_OPT_IP6SA);
+ if (info->data & RXH_IP_DST)
+ hash_opts |= (MVPP22_CLS_HEK_OPT_IP4DA |
+ MVPP22_CLS_HEK_OPT_IP6DA);
+ break;
+ default: return -EOPNOTSUPP;
+ }
+
+ return mvpp2_port_rss_hash_opts_set(port, info->flow_type, hash_opts);
+}
+
+int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
+{
+ unsigned long hash_opts;
+ int i;
+
+ hash_opts = mvpp2_port_rss_hash_opts_get(port, info->flow_type);
+ info->data = 0;
+
+ for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
+ switch (BIT(i)) {
+ case MVPP22_CLS_HEK_OPT_MAC_DA:
+ info->data |= RXH_L2DA;
+ break;
+ case MVPP22_CLS_HEK_OPT_VLAN:
+ info->data |= RXH_VLAN;
+ break;
+ case MVPP22_CLS_HEK_OPT_L3_PROTO:
+ info->data |= RXH_L3_PROTO;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP4SA:
+ case MVPP22_CLS_HEK_OPT_IP6SA:
+ info->data |= RXH_IP_SRC;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP4DA:
+ case MVPP22_CLS_HEK_OPT_IP6DA:
+ info->data |= RXH_IP_DST;
+ break;
+ case MVPP22_CLS_HEK_OPT_L4SIP:
+ info->data |= RXH_L4_B_0_1;
+ break;
+ case MVPP22_CLS_HEK_OPT_L4DIP:
+ info->data |= RXH_L4_B_2_3;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+void mvpp22_rss_port_init(struct mvpp2_port *port)
{
struct mvpp2 *priv = port->priv;
int i;
@@ -115,27 +1045,30 @@ void mvpp22_init_rss(struct mvpp2_port *port)
/* Set the table width: replace the whole classifier Rx queue number
* with the ones configured in RSS table entries.
*/
- mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(0));
+ mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(port->id));
mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
- /* Loop through the classifier Rx Queues and map them to a RSS table.
- * Map them all to the first table (0) by default.
+ /* The default RxQ is used as a key to select the RSS table to use.
+ * We use one RSS table per port.
*/
- for (i = 0; i < MVPP2_CLS_RX_QUEUES; i++) {
- mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(i));
- mvpp2_write(priv, MVPP22_RSS_TABLE,
- MVPP22_RSS_TABLE_POINTER(0));
- }
+ mvpp2_write(priv, MVPP22_RSS_INDEX,
+ MVPP22_RSS_INDEX_QUEUE(port->first_rxq));
+ mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE,
+ MVPP22_RSS_TABLE_POINTER(port->id));
/* Configure the first table to evenly distribute the packets across
- * real Rx Queues. The table entries map a hash to an port Rx Queue.
+ * real Rx Queues. The table entries map a hash to a port Rx Queue.
*/
- for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
- u32 sel = MVPP22_RSS_INDEX_TABLE(0) |
- MVPP22_RSS_INDEX_TABLE_ENTRY(i);
- mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
+ for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++)
+ port->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs);
- mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, i % port->nrxqs);
- }
+ mvpp22_rss_fill_table(port, port->id);
+ /* Configure default flows */
+ mvpp2_port_rss_hash_opts_set(port, IPV4_FLOW, MVPP22_CLS_HEK_IP4_2T);
+ mvpp2_port_rss_hash_opts_set(port, IPV6_FLOW, MVPP22_CLS_HEK_IP6_2T);
+ mvpp2_port_rss_hash_opts_set(port, TCP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
+ mvpp2_port_rss_hash_opts_set(port, TCP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
+ mvpp2_port_rss_hash_opts_set(port, UDP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
+ mvpp2_port_rss_hash_opts_set(port, UDP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
index 8e1d7f9ffa0b..089f05f29891 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
@@ -1,27 +1,187 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* RSS and Classifier definitions for Marvell PPv2 Network Controller
*
* Copyright (C) 2014 Marvell
*
* Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#ifndef _MVPP2_CLS_H_
#define _MVPP2_CLS_H_
+#include "mvpp2.h"
+#include "mvpp2_prs.h"
+
/* Classifier constants */
#define MVPP2_CLS_FLOWS_TBL_SIZE 512
#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
#define MVPP2_CLS_LKP_TBL_SIZE 64
#define MVPP2_CLS_RX_QUEUES 256
-/* RSS constants */
-#define MVPP22_RSS_TABLE_ENTRIES 32
+/* Classifier flow constants */
+
+#define MVPP2_FLOW_N_FIELDS 4
+
+enum mvpp2_cls_engine {
+ MVPP22_CLS_ENGINE_C2 = 1,
+ MVPP22_CLS_ENGINE_C3A,
+ MVPP22_CLS_ENGINE_C3B,
+ MVPP22_CLS_ENGINE_C4,
+ MVPP22_CLS_ENGINE_C3HA = 6,
+ MVPP22_CLS_ENGINE_C3HB = 7,
+};
+
+#define MVPP22_CLS_HEK_OPT_MAC_DA BIT(0)
+#define MVPP22_CLS_HEK_OPT_VLAN BIT(1)
+#define MVPP22_CLS_HEK_OPT_L3_PROTO BIT(2)
+#define MVPP22_CLS_HEK_OPT_IP4SA BIT(3)
+#define MVPP22_CLS_HEK_OPT_IP4DA BIT(4)
+#define MVPP22_CLS_HEK_OPT_IP6SA BIT(5)
+#define MVPP22_CLS_HEK_OPT_IP6DA BIT(6)
+#define MVPP22_CLS_HEK_OPT_L4SIP BIT(7)
+#define MVPP22_CLS_HEK_OPT_L4DIP BIT(8)
+#define MVPP22_CLS_HEK_N_FIELDS 9
+
+#define MVPP22_CLS_HEK_L4_OPTS (MVPP22_CLS_HEK_OPT_L4SIP | \
+ MVPP22_CLS_HEK_OPT_L4DIP)
+
+#define MVPP22_CLS_HEK_IP4_2T (MVPP22_CLS_HEK_OPT_IP4SA | \
+ MVPP22_CLS_HEK_OPT_IP4DA)
+
+#define MVPP22_CLS_HEK_IP6_2T (MVPP22_CLS_HEK_OPT_IP6SA | \
+ MVPP22_CLS_HEK_OPT_IP6DA)
+
+/* The fifth tuple in "5T" is the L4_Info field */
+#define MVPP22_CLS_HEK_IP4_5T (MVPP22_CLS_HEK_IP4_2T | \
+ MVPP22_CLS_HEK_L4_OPTS)
+
+#define MVPP22_CLS_HEK_IP6_5T (MVPP22_CLS_HEK_IP6_2T | \
+ MVPP22_CLS_HEK_L4_OPTS)
+
+enum mvpp2_cls_field_id {
+ MVPP22_CLS_FIELD_MAC_DA = 0x03,
+ MVPP22_CLS_FIELD_VLAN = 0x06,
+ MVPP22_CLS_FIELD_L3_PROTO = 0x0f,
+ MVPP22_CLS_FIELD_IP4SA = 0x10,
+ MVPP22_CLS_FIELD_IP4DA = 0x11,
+ MVPP22_CLS_FIELD_IP6SA = 0x17,
+ MVPP22_CLS_FIELD_IP6DA = 0x1a,
+ MVPP22_CLS_FIELD_L4SIP = 0x1d,
+ MVPP22_CLS_FIELD_L4DIP = 0x1e,
+};
+
+enum mvpp2_cls_flow_seq {
+ MVPP2_CLS_FLOW_SEQ_NORMAL = 0,
+ MVPP2_CLS_FLOW_SEQ_FIRST1,
+ MVPP2_CLS_FLOW_SEQ_FIRST2,
+ MVPP2_CLS_FLOW_SEQ_LAST,
+ MVPP2_CLS_FLOW_SEQ_MIDDLE
+};
+
+/* Classifier C2 engine constants */
+#define MVPP22_CLS_C2_TCAM_EN(data) ((data) << 16)
+
+enum mvpp22_cls_c2_action {
+ MVPP22_C2_NO_UPD = 0,
+ MVPP22_C2_NO_UPD_LOCK,
+ MVPP22_C2_UPD,
+ MVPP22_C2_UPD_LOCK,
+};
+
+enum mvpp22_cls_c2_fwd_action {
+ MVPP22_C2_FWD_NO_UPD = 0,
+ MVPP22_C2_FWD_NO_UPD_LOCK,
+ MVPP22_C2_FWD_SW,
+ MVPP22_C2_FWD_SW_LOCK,
+ MVPP22_C2_FWD_HW,
+ MVPP22_C2_FWD_HW_LOCK,
+ MVPP22_C2_FWD_HW_LOW_LAT,
+ MVPP22_C2_FWD_HW_LOW_LAT_LOCK,
+};
+
+#define MVPP2_CLS_C2_TCAM_WORDS 5
+#define MVPP2_CLS_C2_ATTR_WORDS 5
+
+struct mvpp2_cls_c2_entry {
+ u32 index;
+ u32 tcam[MVPP2_CLS_C2_TCAM_WORDS];
+ u32 act;
+ u32 attr[MVPP2_CLS_C2_ATTR_WORDS];
+};
+
+/* Classifier C2 engine entries */
+#define MVPP22_CLS_C2_RSS_ENTRY(port) (port)
+#define MVPP22_CLS_C2_N_ENTRIES MVPP2_MAX_PORTS
+/* RSS flow entries in the flow table. We have 2 entries per port for RSS.
+ *
+ * The first performs a lookup using the C2 TCAM engine, to tag the
+ * packet for software forwarding (needed for RSS), enable or disable RSS, and
+ * assign the default rx queue.
+ *
+ * The second configures the hash generation, by specifying which fields of the
+ * packet header are used to generate the hash, and specifies the relevant hash
+ * engine to use.
+ */
+#define MVPP22_RSS_FLOW_C2_OFFS 0
+#define MVPP22_RSS_FLOW_HASH_OFFS 1
+#define MVPP22_RSS_FLOW_SIZE (MVPP22_RSS_FLOW_HASH_OFFS + 1)
+
+#define MVPP22_RSS_FLOW_C2(port) ((port) * MVPP22_RSS_FLOW_SIZE + \
+ MVPP22_RSS_FLOW_C2_OFFS)
+#define MVPP22_RSS_FLOW_HASH(port) ((port) * MVPP22_RSS_FLOW_SIZE + \
+ MVPP22_RSS_FLOW_HASH_OFFS)
+#define MVPP22_RSS_FLOW_FIRST(port) MVPP22_RSS_FLOW_C2(port)
+
+/* Packet flow ID */
+enum mvpp2_prs_flow {
+ MVPP2_FL_START = 8,
+ MVPP2_FL_IP4_TCP_NF_UNTAG = MVPP2_FL_START,
+ MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP2_FL_IP6_TCP_NF_UNTAG,
+ MVPP2_FL_IP6_UDP_NF_UNTAG,
+ MVPP2_FL_IP6_TCP_NF_TAG,
+ MVPP2_FL_IP6_UDP_NF_TAG,
+ MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP2_FL_IP6_TCP_FRAG_UNTAG,
+ MVPP2_FL_IP6_UDP_FRAG_UNTAG,
+ MVPP2_FL_IP6_TCP_FRAG_TAG,
+ MVPP2_FL_IP6_UDP_FRAG_TAG,
+ MVPP2_FL_IP4_UNTAG, /* non-TCP, non-UDP, same for below */
+ MVPP2_FL_IP4_TAG,
+ MVPP2_FL_IP6_UNTAG,
+ MVPP2_FL_IP6_TAG,
+ MVPP2_FL_NON_IP_UNTAG,
+ MVPP2_FL_NON_IP_TAG,
+ MVPP2_FL_LAST,
+};
+
+struct mvpp2_cls_flow {
+ /* The L2-L4 traffic flow type */
+ int flow_type;
+
+ /* The first id in the flow table for this flow */
+ u16 flow_id;
+
+ /* The supported HEK fields for this flow */
+ u16 supported_hash_opts;
+
+ /* The Header Parser result_info that matches this flow */
+ struct mvpp2_prs_result_info prs_ri;
+};
+
+#define MVPP2_N_FLOWS 52
+
+#define MVPP2_ENTRIES_PER_FLOW (MVPP2_MAX_PORTS + 1)
+#define MVPP2_FLOW_C2_ENTRY(id) ((id) * MVPP2_ENTRIES_PER_FLOW)
+#define MVPP2_PORT_FLOW_HASH_ENTRY(port, id) ((id) * MVPP2_ENTRIES_PER_FLOW + \
+ (port) + 1)
struct mvpp2_cls_flow_entry {
u32 index;
u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
@@ -33,7 +193,15 @@ struct mvpp2_cls_lookup_entry {
u32 data;
};
-void mvpp22_init_rss(struct mvpp2_port *port);
+void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table);
+
+void mvpp22_rss_port_init(struct mvpp2_port *port);
+
+void mvpp22_rss_enable(struct mvpp2_port *port);
+void mvpp22_rss_disable(struct mvpp2_port *port);
+
+int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info);
+int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info);
void mvpp2_cls_init(struct mvpp2 *priv);
@@ -41,4 +209,25 @@ void mvpp2_cls_port_config(struct mvpp2_port *port);
void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port);
+int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe);
+
+u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe);
+
+struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow);
+
+u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index);
+
+void mvpp2_cls_flow_read(struct mvpp2 *priv, int index,
+ struct mvpp2_cls_flow_entry *fe);
+
+u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index);
+
+void mvpp2_cls_lookup_read(struct mvpp2 *priv, int lkpid, int way,
+ struct mvpp2_cls_lookup_entry *le);
+
+u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index);
+
+void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
+ struct mvpp2_cls_c2_entry *c2);
+
#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
new file mode 100644
index 000000000000..f9744a61e5dd
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
@@ -0,0 +1,703 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Marvell PPv2 network controller for Armada 375 SoC.
+ *
+ * Copyright (C) 2018 Marvell
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+
+#include "mvpp2.h"
+#include "mvpp2_prs.h"
+#include "mvpp2_cls.h"
+
+struct mvpp2_dbgfs_prs_entry {
+ int tid;
+ struct mvpp2 *priv;
+};
+
+struct mvpp2_dbgfs_flow_entry {
+ int flow;
+ struct mvpp2 *priv;
+};
+
+struct mvpp2_dbgfs_port_flow_entry {
+ struct mvpp2_port *port;
+ struct mvpp2_dbgfs_flow_entry *dbg_fe;
+};
+
+static int mvpp2_dbgfs_flow_flt_hits_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_flow_entry *entry = s->private;
+ int id = MVPP2_FLOW_C2_ENTRY(entry->flow);
+
+ u32 hits = mvpp2_cls_flow_hits(entry->priv, id);
+
+ seq_printf(s, "%u\n", hits);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_flt_hits);
+
+static int mvpp2_dbgfs_flow_dec_hits_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_flow_entry *entry = s->private;
+
+ u32 hits = mvpp2_cls_lookup_hits(entry->priv, entry->flow);
+
+ seq_printf(s, "%u\n", hits);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_dec_hits);
+
+static int mvpp2_dbgfs_flow_type_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_flow_entry *entry = s->private;
+ struct mvpp2_cls_flow *f;
+ const char *flow_name;
+
+ f = mvpp2_cls_flow_get(entry->flow);
+ if (!f)
+ return -EINVAL;
+
+ switch (f->flow_type) {
+ case IPV4_FLOW:
+ flow_name = "ipv4";
+ break;
+ case IPV6_FLOW:
+ flow_name = "ipv6";
+ break;
+ case TCP_V4_FLOW:
+ flow_name = "tcp4";
+ break;
+ case TCP_V6_FLOW:
+ flow_name = "tcp6";
+ break;
+ case UDP_V4_FLOW:
+ flow_name = "udp4";
+ break;
+ case UDP_V6_FLOW:
+ flow_name = "udp6";
+ break;
+ default:
+ flow_name = "other";
+ }
+
+ seq_printf(s, "%s\n", flow_name);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_flow_type_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mvpp2_dbgfs_flow_type_show, inode->i_private);
+}
+
+static int mvpp2_dbgfs_flow_type_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+ struct mvpp2_dbgfs_flow_entry *flow_entry = seq->private;
+
+ kfree(flow_entry);
+ return single_release(inode, file);
+}
+
+static const struct file_operations mvpp2_dbgfs_flow_type_fops = {
+ .open = mvpp2_dbgfs_flow_type_open,
+ .read = seq_read,
+ .release = mvpp2_dbgfs_flow_type_release,
+};
+
+static int mvpp2_dbgfs_flow_id_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_flow_entry *entry = s->private;
+ struct mvpp2_cls_flow *f;
+
+ f = mvpp2_cls_flow_get(entry->flow);
+ if (!f)
+ return -EINVAL;
+
+ seq_printf(s, "%d\n", f->flow_id);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_id);
+
+static int mvpp2_dbgfs_port_flow_hash_opt_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_port_flow_entry *entry = s->private;
+ struct mvpp2_port *port = entry->port;
+ struct mvpp2_cls_flow_entry fe;
+ struct mvpp2_cls_flow *f;
+ int flow_index;
+ u16 hash_opts;
+
+ f = mvpp2_cls_flow_get(entry->dbg_fe->flow);
+ if (!f)
+ return -EINVAL;
+
+ flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(entry->port->id, f->flow_id);
+
+ mvpp2_cls_flow_read(port->priv, flow_index, &fe);
+
+ hash_opts = mvpp2_flow_get_hek_fields(&fe);
+
+ seq_printf(s, "0x%04x\n", hash_opts);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_port_flow_hash_opt_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, mvpp2_dbgfs_port_flow_hash_opt_show,
+ inode->i_private);
+}
+
+static int mvpp2_dbgfs_port_flow_hash_opt_release(struct inode *inode,
+ struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+ struct mvpp2_dbgfs_port_flow_entry *flow_entry = seq->private;
+
+ kfree(flow_entry);
+ return single_release(inode, file);
+}
+
+static const struct file_operations mvpp2_dbgfs_port_flow_hash_opt_fops = {
+ .open = mvpp2_dbgfs_port_flow_hash_opt_open,
+ .read = seq_read,
+ .release = mvpp2_dbgfs_port_flow_hash_opt_release,
+};
+
+static int mvpp2_dbgfs_port_flow_engine_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_port_flow_entry *entry = s->private;
+ struct mvpp2_port *port = entry->port;
+ struct mvpp2_cls_flow_entry fe;
+ struct mvpp2_cls_flow *f;
+ int flow_index, engine;
+
+ f = mvpp2_cls_flow_get(entry->dbg_fe->flow);
+ if (!f)
+ return -EINVAL;
+
+ flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(entry->port->id, f->flow_id);
+
+ mvpp2_cls_flow_read(port->priv, flow_index, &fe);
+
+ engine = mvpp2_cls_flow_eng_get(&fe);
+
+ seq_printf(s, "%d\n", engine);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_flow_engine);
+
+static int mvpp2_dbgfs_flow_c2_hits_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_port *port = s->private;
+ u32 hits;
+
+ hits = mvpp2_cls_c2_hit_count(port->priv,
+ MVPP22_CLS_C2_RSS_ENTRY(port->id));
+
+ seq_printf(s, "%u\n", hits);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_hits);
+
+static int mvpp2_dbgfs_flow_c2_rxq_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_port *port = s->private;
+ struct mvpp2_cls_c2_entry c2;
+ u8 qh, ql;
+
+ mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+
+ qh = (c2.attr[0] >> MVPP22_CLS_C2_ATTR0_QHIGH_OFFS) &
+ MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+
+ ql = (c2.attr[0] >> MVPP22_CLS_C2_ATTR0_QLOW_OFFS) &
+ MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+
+ seq_printf(s, "%d\n", (qh << 3 | ql));
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_rxq);
+
+static int mvpp2_dbgfs_flow_c2_enable_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_port *port = s->private;
+ struct mvpp2_cls_c2_entry c2;
+ int enabled;
+
+ mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+
+ enabled = !!(c2.attr[2] & MVPP22_CLS_C2_ATTR2_RSS_EN);
+
+ seq_printf(s, "%d\n", enabled);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_enable);
+
+static int mvpp2_dbgfs_port_vid_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_port *port = s->private;
+ unsigned char byte[2], enable[2];
+ struct mvpp2 *priv = port->priv;
+ struct mvpp2_prs_entry pe;
+ unsigned long pmap;
+ u16 rvid;
+ int tid;
+
+ for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
+ tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+
+ pmap = mvpp2_prs_tcam_port_map_get(&pe);
+
+ if (!priv->prs_shadow[tid].valid)
+ continue;
+
+ if (!test_bit(port->id, &pmap))
+ continue;
+
+ mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
+ mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
+
+ rvid = ((byte[0] & 0xf) << 8) + byte[1];
+
+ seq_printf(s, "%u\n", rvid);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_vid);
+
+static int mvpp2_dbgfs_port_parser_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_port *port = s->private;
+ struct mvpp2 *priv = port->priv;
+ struct mvpp2_prs_entry pe;
+ unsigned long pmap;
+ int i;
+
+ for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) {
+ mvpp2_prs_init_from_hw(port->priv, &pe, i);
+
+ pmap = mvpp2_prs_tcam_port_map_get(&pe);
+ if (priv->prs_shadow[i].valid && test_bit(port->id, &pmap))
+ seq_printf(s, "%03d\n", i);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_parser);
+
+static int mvpp2_dbgfs_filter_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_port *port = s->private;
+ struct mvpp2 *priv = port->priv;
+ struct mvpp2_prs_entry pe;
+ unsigned long pmap;
+ int index, tid;
+
+ for (tid = MVPP2_PE_MAC_RANGE_START;
+ tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
+ unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
+
+ if (!priv->prs_shadow[tid].valid ||
+ priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC ||
+ priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)
+ continue;
+
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+
+ pmap = mvpp2_prs_tcam_port_map_get(&pe);
+
+ /* We only want entries active on this port */
+ if (!test_bit(port->id, &pmap))
+ continue;
+
+ /* Read mac addr from entry */
+ for (index = 0; index < ETH_ALEN; index++)
+ mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
+ &da_mask[index]);
+
+ seq_printf(s, "%pM\n", da);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_filter);
+
+static int mvpp2_dbgfs_prs_lu_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2 *priv = entry->priv;
+
+ seq_printf(s, "%x\n", priv->prs_shadow[entry->tid].lu);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_lu);
+
+static int mvpp2_dbgfs_prs_pmap_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2_prs_entry pe;
+ unsigned int pmap;
+
+ mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid);
+
+ pmap = mvpp2_prs_tcam_port_map_get(&pe);
+ pmap &= MVPP2_PRS_PORT_MASK;
+
+ seq_printf(s, "%02x\n", pmap);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_pmap);
+
+static int mvpp2_dbgfs_prs_ai_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2_prs_entry pe;
+ unsigned char ai, ai_mask;
+
+ mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid);
+
+ ai = pe.tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK;
+ ai_mask = (pe.tcam[MVPP2_PRS_TCAM_AI_WORD] >> 16) & MVPP2_PRS_AI_MASK;
+
+ seq_printf(s, "%02x %02x\n", ai, ai_mask);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_ai);
+
+static int mvpp2_dbgfs_prs_hdata_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2_prs_entry pe;
+ unsigned char data[8], mask[8];
+ int i;
+
+ mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid);
+
+ for (i = 0; i < 8; i++)
+ mvpp2_prs_tcam_data_byte_get(&pe, i, &data[i], &mask[i]);
+
+ seq_printf(s, "%*phN %*phN\n", 8, data, 8, mask);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_hdata);
+
+static int mvpp2_dbgfs_prs_sram_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2_prs_entry pe;
+
+ mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid);
+
+ seq_printf(s, "%*phN\n", 14, pe.sram);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_sram);
+
+static int mvpp2_dbgfs_prs_hits_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ int val;
+
+ val = mvpp2_prs_hits(entry->priv, entry->tid);
+ if (val < 0)
+ return val;
+
+ seq_printf(s, "%d\n", val);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_hits);
+
+static int mvpp2_dbgfs_prs_valid_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2 *priv = entry->priv;
+ int tid = entry->tid;
+
+ seq_printf(s, "%d\n", priv->prs_shadow[tid].valid ? 1 : 0);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_prs_valid_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mvpp2_dbgfs_prs_valid_show, inode->i_private);
+}
+
+static int mvpp2_dbgfs_prs_valid_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+ struct mvpp2_dbgfs_prs_entry *entry = seq->private;
+
+ kfree(entry);
+ return single_release(inode, file);
+}
+
+static const struct file_operations mvpp2_dbgfs_prs_valid_fops = {
+ .open = mvpp2_dbgfs_prs_valid_open,
+ .read = seq_read,
+ .release = mvpp2_dbgfs_prs_valid_release,
+};
+
+static int mvpp2_dbgfs_flow_port_init(struct dentry *parent,
+ struct mvpp2_port *port,
+ struct mvpp2_dbgfs_flow_entry *entry)
+{
+ struct mvpp2_dbgfs_port_flow_entry *port_entry;
+ struct dentry *port_dir;
+
+ port_dir = debugfs_create_dir(port->dev->name, parent);
+ if (IS_ERR(port_dir))
+ return PTR_ERR(port_dir);
+
+ /* This will be freed by 'hash_opts' release op */
+ port_entry = kmalloc(sizeof(*port_entry), GFP_KERNEL);
+ if (!port_entry)
+ return -ENOMEM;
+
+ port_entry->port = port;
+ port_entry->dbg_fe = entry;
+
+ debugfs_create_file("hash_opts", 0444, port_dir, port_entry,
+ &mvpp2_dbgfs_port_flow_hash_opt_fops);
+
+ debugfs_create_file("engine", 0444, port_dir, port_entry,
+ &mvpp2_dbgfs_port_flow_engine_fops);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_flow_entry_init(struct dentry *parent,
+ struct mvpp2 *priv, int flow)
+{
+ struct mvpp2_dbgfs_flow_entry *entry;
+ struct dentry *flow_entry_dir;
+ char flow_entry_name[10];
+ int i, ret;
+
+ sprintf(flow_entry_name, "%02d", flow);
+
+ flow_entry_dir = debugfs_create_dir(flow_entry_name, parent);
+ if (!flow_entry_dir)
+ return -ENOMEM;
+
+ /* This will be freed by 'type' release op */
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->flow = flow;
+ entry->priv = priv;
+
+ debugfs_create_file("flow_hits", 0444, flow_entry_dir, entry,
+ &mvpp2_dbgfs_flow_flt_hits_fops);
+
+ debugfs_create_file("dec_hits", 0444, flow_entry_dir, entry,
+ &mvpp2_dbgfs_flow_dec_hits_fops);
+
+ debugfs_create_file("type", 0444, flow_entry_dir, entry,
+ &mvpp2_dbgfs_flow_type_fops);
+
+ debugfs_create_file("id", 0444, flow_entry_dir, entry,
+ &mvpp2_dbgfs_flow_id_fops);
+
+ /* Create entry for each port */
+ for (i = 0; i < priv->port_count; i++) {
+ ret = mvpp2_dbgfs_flow_port_init(flow_entry_dir,
+ priv->port_list[i], entry);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int mvpp2_dbgfs_flow_init(struct dentry *parent, struct mvpp2 *priv)
+{
+ struct dentry *flow_dir;
+ int i, ret;
+
+ flow_dir = debugfs_create_dir("flows", parent);
+ if (!flow_dir)
+ return -ENOMEM;
+
+ for (i = 0; i < MVPP2_N_FLOWS; i++) {
+ ret = mvpp2_dbgfs_flow_entry_init(flow_dir, priv, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_prs_entry_init(struct dentry *parent,
+ struct mvpp2 *priv, int tid)
+{
+ struct mvpp2_dbgfs_prs_entry *entry;
+ struct dentry *prs_entry_dir;
+ char prs_entry_name[10];
+
+ if (tid >= MVPP2_PRS_TCAM_SRAM_SIZE)
+ return -EINVAL;
+
+ sprintf(prs_entry_name, "%03d", tid);
+
+ prs_entry_dir = debugfs_create_dir(prs_entry_name, parent);
+ if (!prs_entry_dir)
+ return -ENOMEM;
+
+ /* The 'valid' entry's ops will free that */
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->tid = tid;
+ entry->priv = priv;
+
+ /* Create each attr */
+ debugfs_create_file("sram", 0444, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_sram_fops);
+
+ debugfs_create_file("valid", 0644, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_valid_fops);
+
+ debugfs_create_file("lookup_id", 0644, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_lu_fops);
+
+ debugfs_create_file("ai", 0644, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_ai_fops);
+
+ debugfs_create_file("header_data", 0644, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_hdata_fops);
+
+ debugfs_create_file("hits", 0444, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_hits_fops);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_prs_init(struct dentry *parent, struct mvpp2 *priv)
+{
+ struct dentry *prs_dir;
+ int i, ret;
+
+ prs_dir = debugfs_create_dir("parser", parent);
+ if (!prs_dir)
+ return -ENOMEM;
+
+ for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) {
+ ret = mvpp2_dbgfs_prs_entry_init(prs_dir, priv, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_port_init(struct dentry *parent,
+ struct mvpp2_port *port)
+{
+ struct dentry *port_dir;
+
+ port_dir = debugfs_create_dir(port->dev->name, parent);
+ if (IS_ERR(port_dir))
+ return PTR_ERR(port_dir);
+
+ debugfs_create_file("parser_entries", 0444, port_dir, port,
+ &mvpp2_dbgfs_port_parser_fops);
+
+ debugfs_create_file("mac_filter", 0444, port_dir, port,
+ &mvpp2_dbgfs_filter_fops);
+
+ debugfs_create_file("vid_filter", 0444, port_dir, port,
+ &mvpp2_dbgfs_port_vid_fops);
+
+ debugfs_create_file("c2_hits", 0444, port_dir, port,
+ &mvpp2_dbgfs_flow_c2_hits_fops);
+
+ debugfs_create_file("default_rxq", 0444, port_dir, port,
+ &mvpp2_dbgfs_flow_c2_rxq_fops);
+
+ debugfs_create_file("rss_enable", 0444, port_dir, port,
+ &mvpp2_dbgfs_flow_c2_enable_fops);
+
+ return 0;
+}
+
+void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
+{
+ debugfs_remove_recursive(priv->dbgfs_dir);
+}
+
+void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
+{
+ struct dentry *mvpp2_dir, *mvpp2_root;
+ int ret, i;
+
+ mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL);
+ if (!mvpp2_root) {
+ mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL);
+ if (IS_ERR(mvpp2_root))
+ return;
+ }
+
+ mvpp2_dir = debugfs_create_dir(name, mvpp2_root);
+ if (IS_ERR(mvpp2_dir))
+ return;
+
+ priv->dbgfs_dir = mvpp2_dir;
+
+ ret = mvpp2_dbgfs_prs_init(mvpp2_dir, priv);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < priv->port_count; i++) {
+ ret = mvpp2_dbgfs_port_init(mvpp2_dir, priv->port_list[i]);
+ if (ret)
+ goto err;
+ }
+
+ ret = mvpp2_dbgfs_flow_init(mvpp2_dir, priv);
+ if (ret)
+ goto err;
+
+ return;
+err:
+ mvpp2_dbgfs_cleanup(priv);
+}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 0319ed9ef8b8..32d785b616e1 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Marvell PPv2 network controller for Armada 375 SoC.
*
* Copyright (C) 2014 Marvell
*
* Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/acpi.h>
@@ -66,7 +63,7 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
#define MVPP2_QDIST_SINGLE_MODE 0
#define MVPP2_QDIST_MULTI_MODE 1
-static int queue_mode = MVPP2_QDIST_SINGLE_MODE;
+static int queue_mode = MVPP2_QDIST_MULTI_MODE;
module_param(queue_mode, int, 0444);
MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
@@ -151,9 +148,10 @@ static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
struct mvpp2_tx_desc *tx_desc)
{
if (port->priv->hw_version == MVPP21)
- return tx_desc->pp21.buf_dma_addr;
+ return le32_to_cpu(tx_desc->pp21.buf_dma_addr);
else
- return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK;
+ return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) &
+ MVPP2_DESC_DMA_MASK;
}
static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
@@ -166,12 +164,12 @@ static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
offset = dma_addr & MVPP2_TX_DESC_ALIGN;
if (port->priv->hw_version == MVPP21) {
- tx_desc->pp21.buf_dma_addr = addr;
+ tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr);
tx_desc->pp21.packet_offset = offset;
} else {
- u64 val = (u64)addr;
+ __le64 val = cpu_to_le64(addr);
- tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK;
+ tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK);
tx_desc->pp22.buf_dma_addr_ptp |= val;
tx_desc->pp22.packet_offset = offset;
}
@@ -181,9 +179,9 @@ static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
struct mvpp2_tx_desc *tx_desc)
{
if (port->priv->hw_version == MVPP21)
- return tx_desc->pp21.data_size;
+ return le16_to_cpu(tx_desc->pp21.data_size);
else
- return tx_desc->pp22.data_size;
+ return le16_to_cpu(tx_desc->pp22.data_size);
}
static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
@@ -191,9 +189,9 @@ static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
size_t size)
{
if (port->priv->hw_version == MVPP21)
- tx_desc->pp21.data_size = size;
+ tx_desc->pp21.data_size = cpu_to_le16(size);
else
- tx_desc->pp22.data_size = size;
+ tx_desc->pp22.data_size = cpu_to_le16(size);
}
static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
@@ -211,9 +209,9 @@ static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
unsigned int command)
{
if (port->priv->hw_version == MVPP21)
- tx_desc->pp21.command = command;
+ tx_desc->pp21.command = cpu_to_le32(command);
else
- tx_desc->pp22.command = command;
+ tx_desc->pp22.command = cpu_to_le32(command);
}
static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
@@ -229,36 +227,38 @@ static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
struct mvpp2_rx_desc *rx_desc)
{
if (port->priv->hw_version == MVPP21)
- return rx_desc->pp21.buf_dma_addr;
+ return le32_to_cpu(rx_desc->pp21.buf_dma_addr);
else
- return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK;
+ return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) &
+ MVPP2_DESC_DMA_MASK;
}
static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
struct mvpp2_rx_desc *rx_desc)
{
if (port->priv->hw_version == MVPP21)
- return rx_desc->pp21.buf_cookie;
+ return le32_to_cpu(rx_desc->pp21.buf_cookie);
else
- return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK;
+ return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) &
+ MVPP2_DESC_DMA_MASK;
}
static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
struct mvpp2_rx_desc *rx_desc)
{
if (port->priv->hw_version == MVPP21)
- return rx_desc->pp21.data_size;
+ return le16_to_cpu(rx_desc->pp21.data_size);
else
- return rx_desc->pp22.data_size;
+ return le16_to_cpu(rx_desc->pp22.data_size);
}
static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
struct mvpp2_rx_desc *rx_desc)
{
if (port->priv->hw_version == MVPP21)
- return rx_desc->pp21.status;
+ return le32_to_cpu(rx_desc->pp21.status);
else
- return rx_desc->pp22.status;
+ return le32_to_cpu(rx_desc->pp22.status);
}
static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
@@ -1735,7 +1735,7 @@ static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
command |= MVPP2_TXD_IP_CSUM_DISABLE;
- if (l3_proto == swab16(ETH_P_IP)) {
+ if (l3_proto == htons(ETH_P_IP)) {
command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
} else {
@@ -3273,6 +3273,11 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port)
}
}
+static bool mvpp22_rss_is_supported(void)
+{
+ return queue_mode == MVPP2_QDIST_MULTI_MODE;
+}
+
static int mvpp2_open(struct net_device *dev)
{
struct mvpp2_port *port = netdev_priv(dev);
@@ -3365,9 +3370,6 @@ static int mvpp2_open(struct net_device *dev)
mvpp2_start_dev(port);
- if (priv->hw_version == MVPP22)
- mvpp22_init_rss(port);
-
/* Start hardware statistics gathering */
queue_delayed_work(priv->stats_queue, &port->stats_work,
MVPP2_MIB_COUNTERS_STATS_DELAY);
@@ -3626,6 +3628,13 @@ static int mvpp2_set_features(struct net_device *dev,
}
}
+ if (changed & NETIF_F_RXHASH) {
+ if (features & NETIF_F_RXHASH)
+ mvpp22_rss_enable(port);
+ else
+ mvpp22_rss_disable(port);
+ }
+
return 0;
}
@@ -3813,6 +3822,94 @@ static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev,
return phylink_ethtool_ksettings_set(port->phylink, cmd);
}
+static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info, u32 *rules)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int ret = 0;
+
+ if (!mvpp22_rss_is_supported())
+ return -EOPNOTSUPP;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXFH:
+ ret = mvpp2_ethtool_rxfh_get(port, info);
+ break;
+ case ETHTOOL_GRXRINGS:
+ info->data = port->nrxqs;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return ret;
+}
+
+static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int ret = 0;
+
+ if (!mvpp22_rss_is_supported())
+ return -EOPNOTSUPP;
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = mvpp2_ethtool_rxfh_set(port, info);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return ret;
+}
+
+static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
+{
+ return mvpp22_rss_is_supported() ? MVPP22_RSS_TABLE_ENTRIES : 0;
+}
+
+static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!mvpp22_rss_is_supported())
+ return -EOPNOTSUPP;
+
+ if (indir)
+ memcpy(indir, port->indir,
+ ARRAY_SIZE(port->indir) * sizeof(port->indir[0]));
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_CRC32;
+
+ return 0;
+}
+
+static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!mvpp22_rss_is_supported())
+ return -EOPNOTSUPP;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
+ return -EOPNOTSUPP;
+
+ if (key)
+ return -EOPNOTSUPP;
+
+ if (indir) {
+ memcpy(port->indir, indir,
+ ARRAY_SIZE(port->indir) * sizeof(port->indir[0]));
+ mvpp22_rss_fill_table(port, port->id);
+ }
+
+ return 0;
+}
+
/* Device ops */
static const struct net_device_ops mvpp2_netdev_ops = {
@@ -3844,6 +3941,12 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
.set_pauseparam = mvpp2_ethtool_set_pause_param,
.get_link_ksettings = mvpp2_ethtool_get_link_ksettings,
.set_link_ksettings = mvpp2_ethtool_set_link_ksettings,
+ .get_rxnfc = mvpp2_ethtool_get_rxnfc,
+ .set_rxnfc = mvpp2_ethtool_set_rxnfc,
+ .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
+ .get_rxfh = mvpp2_ethtool_get_rxfh,
+ .set_rxfh = mvpp2_ethtool_set_rxfh,
+
};
/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
@@ -3985,8 +4088,8 @@ static int mvpp2_port_init(struct mvpp2_port *port)
MVPP2_MAX_PORTS * priv->max_port_rxqs)
return -EINVAL;
- if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) ||
- (port->ntxqs > MVPP2_MAX_TXQ))
+ if (port->nrxqs % MVPP2_DEFAULT_RXQ ||
+ port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ)
return -EINVAL;
/* Disable port */
@@ -4075,6 +4178,9 @@ static int mvpp2_port_init(struct mvpp2_port *port)
mvpp2_cls_oversize_rxq_set(port);
mvpp2_cls_port_config(port);
+ if (mvpp22_rss_is_supported())
+ mvpp22_rss_port_init(port);
+
/* Provide an initial Rx packet size */
port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
@@ -4681,6 +4787,9 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (mvpp22_rss_is_supported())
+ dev->hw_features |= NETIF_F_RXHASH;
+
if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) {
dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
@@ -5011,6 +5120,12 @@ static int mvpp2_probe(struct platform_device *pdev)
(unsigned long)of_device_get_match_data(&pdev->dev);
}
+ /* multi queue mode isn't supported on PPV2.1, fallback to single
+ * mode
+ */
+ if (priv->hw_version == MVPP21)
+ queue_mode = MVPP2_QDIST_SINGLE_MODE;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
@@ -5174,6 +5289,8 @@ static int mvpp2_probe(struct platform_device *pdev)
goto err_port_probe;
}
+ mvpp2_dbgfs_init(priv, pdev->name);
+
platform_set_drvdata(pdev, priv);
return 0;
@@ -5207,6 +5324,8 @@ static int mvpp2_remove(struct platform_device *pdev)
struct fwnode_handle *port_fwnode;
int i = 0;
+ mvpp2_dbgfs_cleanup(priv);
+
flush_workqueue(priv->stats_queue);
destroy_workqueue(priv->stats_queue);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index 6bb69f086794..392fd895f278 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Header Parser helpers for Marvell PPv2 Network Controller
*
* Copyright (C) 2014 Marvell
*
* Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
@@ -30,24 +27,24 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
return -EINVAL;
/* Clear entry invalidation bit */
- pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
+ pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
/* Write tcam index - indirect access */
mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
- mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
+ mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
/* Write sram index - indirect access */
mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
- mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
+ mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]);
return 0;
}
/* Initialize tcam entry from hw */
-static int mvpp2_prs_init_from_hw(struct mvpp2 *priv,
- struct mvpp2_prs_entry *pe, int tid)
+int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
+ int tid)
{
int i;
@@ -60,18 +57,18 @@ static int mvpp2_prs_init_from_hw(struct mvpp2 *priv,
/* Write tcam index - indirect access */
mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
- pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
+ pe->tcam[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
- if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
+ if (pe->tcam[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
return MVPP2_PRS_TCAM_ENTRY_INVALID;
for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
- pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
+ pe->tcam[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
/* Write sram index - indirect access */
mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
- pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
+ pe->sram[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
return 0;
}
@@ -103,42 +100,35 @@ static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
/* Update lookup field in tcam sw entry */
static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
{
- int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
-
- pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
- pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
+ pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU(MVPP2_PRS_LU_MASK);
+ pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
+ pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU(lu & MVPP2_PRS_LU_MASK);
+ pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
}
/* Update mask for single port in tcam sw entry */
static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
unsigned int port, bool add)
{
- int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
-
if (add)
- pe->tcam.byte[enable_off] &= ~(1 << port);
+ pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(BIT(port));
else
- pe->tcam.byte[enable_off] |= 1 << port;
+ pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(BIT(port));
}
/* Update port map in tcam sw entry */
static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
unsigned int ports)
{
- unsigned char port_mask = MVPP2_PRS_PORT_MASK;
- int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
-
- pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
- pe->tcam.byte[enable_off] &= ~port_mask;
- pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
+ pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT(MVPP2_PRS_PORT_MASK);
+ pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(MVPP2_PRS_PORT_MASK);
+ pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(~ports & MVPP2_PRS_PORT_MASK);
}
/* Obtain port map from tcam sw entry */
-static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
+unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
{
- int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
-
- return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
+ return (~pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] >> 24) & MVPP2_PRS_PORT_MASK;
}
/* Set byte of data and its enable bits in tcam sw entry */
@@ -146,55 +136,58 @@ static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
unsigned int offs, unsigned char byte,
unsigned char enable)
{
- pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
- pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
+ int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
+
+ pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(0xff << pos);
+ pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(MVPP2_PRS_TCAM_EN(0xff) << pos);
+ pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= byte << pos;
+ pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= MVPP2_PRS_TCAM_EN(enable << pos);
}
/* Get byte of data and its enable bits from tcam sw entry */
-static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
- unsigned int offs, unsigned char *byte,
- unsigned char *enable)
+void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
+ unsigned int offs, unsigned char *byte,
+ unsigned char *enable)
{
- *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
- *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
+ int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
+
+ *byte = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> pos) & 0xff;
+ *enable = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> (pos + 16)) & 0xff;
}
/* Compare tcam data bytes with a pattern */
static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
u16 data)
{
- int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
u16 tcam_data;
- tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off];
- if (tcam_data != data)
- return false;
- return true;
+ tcam_data = pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] & 0xffff;
+ return tcam_data == data;
}
/* Update ai bits in tcam sw entry */
static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
unsigned int bits, unsigned int enable)
{
- int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
+ int i;
for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
if (!(enable & BIT(i)))
continue;
if (bits & BIT(i))
- pe->tcam.byte[ai_idx] |= 1 << i;
+ pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= BIT(i);
else
- pe->tcam.byte[ai_idx] &= ~(1 << i);
+ pe->tcam[MVPP2_PRS_TCAM_AI_WORD] &= ~BIT(i);
}
- pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
+ pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= MVPP2_PRS_TCAM_AI_EN(enable);
}
/* Get ai bits from tcam sw entry */
static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
{
- return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
+ return pe->tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK;
}
/* Set ethertype in tcam sw entry */
@@ -215,16 +208,16 @@ static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
/* Set bits in sram sw entry */
static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
- int val)
+ u32 val)
{
- pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
+ pe->sram[MVPP2_BIT_TO_WORD(bit_num)] |= (val << (MVPP2_BIT_IN_WORD(bit_num)));
}
/* Clear bits in sram sw entry */
static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
- int val)
+ u32 val)
{
- pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
+ pe->sram[MVPP2_BIT_TO_WORD(bit_num)] &= ~(val << (MVPP2_BIT_IN_WORD(bit_num)));
}
/* Update ri bits in sram sw entry */
@@ -234,15 +227,16 @@ static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
unsigned int i;
for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
- int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
-
if (!(mask & BIT(i)))
continue;
if (bits & BIT(i))
- mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_OFFS + i,
+ 1);
else
- mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
+ mvpp2_prs_sram_bits_clear(pe,
+ MVPP2_PRS_SRAM_RI_OFFS + i,
+ 1);
mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
}
@@ -251,7 +245,7 @@ static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
/* Obtain ri bits from sram sw entry */
static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
{
- return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
+ return pe->sram[MVPP2_PRS_SRAM_RI_WORD];
}
/* Update ai bits in sram sw entry */
@@ -259,16 +253,18 @@ static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
unsigned int bits, unsigned int mask)
{
unsigned int i;
- int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
if (!(mask & BIT(i)))
continue;
if (bits & BIT(i))
- mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_OFFS + i,
+ 1);
else
- mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
+ mvpp2_prs_sram_bits_clear(pe,
+ MVPP2_PRS_SRAM_AI_OFFS + i,
+ 1);
mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
}
@@ -278,12 +274,12 @@ static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
{
u8 bits;
- int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
- int ai_en_off = ai_off + 1;
- int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
+ /* ai is stored on bits 90->97; so it spreads across two u32 */
+ int ai_off = MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_AI_OFFS);
+ int ai_shift = MVPP2_BIT_IN_WORD(MVPP2_PRS_SRAM_AI_OFFS);
- bits = (pe->sram.byte[ai_off] >> ai_shift) |
- (pe->sram.byte[ai_en_off] << (8 - ai_shift));
+ bits = (pe->sram[ai_off] >> ai_shift) |
+ (pe->sram[ai_off + 1] << (32 - ai_shift));
return bits;
}
@@ -316,8 +312,7 @@ static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
}
/* Set value */
- pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
- (unsigned char)shift;
+ pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] = shift & MVPP2_PRS_SRAM_SHIFT_MASK;
/* Reset and set operation */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
@@ -346,13 +341,8 @@ static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
/* Set value */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
MVPP2_PRS_SRAM_UDF_MASK);
- mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
- pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
- MVPP2_PRS_SRAM_UDF_BITS)] &=
- ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
- pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
- MVPP2_PRS_SRAM_UDF_BITS)] |=
- (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS,
+ offset & MVPP2_PRS_SRAM_UDF_MASK);
/* Set offset type */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
@@ -362,16 +352,8 @@ static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
/* Set offset operation */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
- mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
-
- pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
- MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
- ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
- (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
-
- pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
- MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
- (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
+ op & MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
/* Set base offset as current */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
@@ -662,7 +644,7 @@ static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai)
continue;
mvpp2_prs_init_from_hw(priv, &pe, tid);
- match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid));
+ match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid);
if (!match)
continue;
@@ -790,8 +772,8 @@ static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1,
mvpp2_prs_init_from_hw(priv, &pe, tid);
- match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid1)) &&
- mvpp2_prs_tcam_data_cmp(&pe, 4, swab16(tpid2));
+ match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) &&
+ mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2);
if (!match)
continue;
@@ -932,8 +914,8 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
pe.index = tid;
/* Clear ri before updating */
- pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
- pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+ pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+ pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
@@ -1433,17 +1415,13 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv)
pe.index = tid;
- /* Clear tcam data before updating */
- pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
- pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
-
mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
MVPP2_PRS_IPV4_HEAD,
MVPP2_PRS_IPV4_HEAD_MASK);
/* Clear ri before updating */
- pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
- pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+ pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+ pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
MVPP2_PRS_RI_L3_PROTO_MASK);
@@ -1644,8 +1622,8 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
MVPP2_PRS_IPV4_IHL_MASK);
/* Clear ri before updating */
- pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
- pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+ pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+ pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
MVPP2_PRS_RI_L3_PROTO_MASK);
@@ -2428,6 +2406,41 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
return 0;
}
+int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask)
+{
+ struct mvpp2_prs_entry pe;
+ u8 *ri_byte, *ri_byte_mask;
+ int tid, i;
+
+ memset(&pe, 0, sizeof(pe));
+
+ tid = mvpp2_prs_tcam_first_free(priv,
+ MVPP2_PE_LAST_FREE_TID,
+ MVPP2_PE_FIRST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ pe.index = tid;
+
+ ri_byte = (u8 *)&ri;
+ ri_byte_mask = (u8 *)&ri_mask;
+
+ mvpp2_prs_sram_ai_update(&pe, flow, MVPP2_PRS_FLOW_ID_MASK);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
+
+ for (i = 0; i < 4; i++) {
+ mvpp2_prs_tcam_data_byte_set(&pe, i, ri_byte[i],
+ ri_byte_mask[i]);
+ }
+
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
/* Set prs flow for the port */
int mvpp2_prs_def_flow(struct mvpp2_port *port)
{
@@ -2465,3 +2478,19 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port)
return 0;
}
+
+int mvpp2_prs_hits(struct mvpp2 *priv, int index)
+{
+ u32 val;
+
+ if (index > MVPP2_PRS_TCAM_SRAM_SIZE)
+ return -EINVAL;
+
+ mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index);
+
+ val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG);
+
+ val &= MVPP2_PRS_TCAM_HIT_CNT_MASK;
+
+ return val;
+}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
index 22fbbc4c8b28..e22f6c85d380 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
@@ -1,22 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Header Parser definitions for Marvell PPv2 Network Controller
*
* Copyright (C) 2014 Marvell
*
* Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
+#ifndef _MVPP2_PRS_H_
+#define _MVPP2_PRS_H_
+
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/platform_device.h>
#include "mvpp2.h"
-#ifndef _MVPP2_PRS_H_
-#define _MVPP2_PRS_H_
-
/* Parser constants */
#define MVPP2_PRS_TCAM_SRAM_SIZE 256
#define MVPP2_PRS_TCAM_WORDS 6
@@ -50,17 +48,25 @@
* The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
*/
#define MVPP2_PRS_AI_BITS 8
+#define MVPP2_PRS_AI_MASK 0xff
#define MVPP2_PRS_PORT_MASK 0xff
#define MVPP2_PRS_LU_MASK 0xf
-#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
- (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
-#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
- (((offs) * 2) - ((offs) % 2) + 2)
-#define MVPP2_PRS_TCAM_AI_BYTE 16
-#define MVPP2_PRS_TCAM_PORT_BYTE 17
-#define MVPP2_PRS_TCAM_LU_BYTE 20
-#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
-#define MVPP2_PRS_TCAM_INV_WORD 5
+
+/* TCAM entries in registers are accessed using 16 data bits + 16 enable bits */
+#define MVPP2_PRS_BYTE_TO_WORD(byte) ((byte) / 2)
+#define MVPP2_PRS_BYTE_IN_WORD(byte) ((byte) % 2)
+
+#define MVPP2_PRS_TCAM_EN(data) ((data) << 16)
+#define MVPP2_PRS_TCAM_AI_WORD 4
+#define MVPP2_PRS_TCAM_AI(ai) (ai)
+#define MVPP2_PRS_TCAM_AI_EN(ai) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_AI(ai))
+#define MVPP2_PRS_TCAM_PORT_WORD 4
+#define MVPP2_PRS_TCAM_PORT(p) ((p) << 8)
+#define MVPP2_PRS_TCAM_PORT_EN(p) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_PORT(p))
+#define MVPP2_PRS_TCAM_LU_WORD 5
+#define MVPP2_PRS_TCAM_LU(lu) (lu)
+#define MVPP2_PRS_TCAM_LU_EN(lu) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_LU(lu))
+#define MVPP2_PRS_TCAM_INV_WORD 5
#define MVPP2_PRS_VID_TCAM_BYTE 2
@@ -146,6 +152,7 @@
#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
#define MVPP2_PRS_SRAM_SHIFT_OFFS 64
#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
+#define MVPP2_PRS_SRAM_SHIFT_MASK 0xff
#define MVPP2_PRS_SRAM_UDF_OFFS 73
#define MVPP2_PRS_SRAM_UDF_BITS 8
#define MVPP2_PRS_SRAM_UDF_MASK 0xff
@@ -214,6 +221,10 @@
#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
#define MVPP2_PRS_RI_DROP_MASK 0x80000000
+#define MVPP2_PRS_IP_MASK (MVPP2_PRS_RI_L3_PROTO_MASK | \
+ MVPP2_PRS_RI_IP_FRAG_MASK | \
+ MVPP2_PRS_RI_L4_PROTO_MASK)
+
/* Sram additional info bits assignment */
#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
@@ -255,20 +266,15 @@ enum mvpp2_prs_lookup {
MVPP2_PRS_LU_LAST,
};
-union mvpp2_prs_tcam_entry {
- u32 word[MVPP2_PRS_TCAM_WORDS];
- u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
-};
-
-union mvpp2_prs_sram_entry {
- u32 word[MVPP2_PRS_SRAM_WORDS];
- u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
-};
-
struct mvpp2_prs_entry {
u32 index;
- union mvpp2_prs_tcam_entry tcam;
- union mvpp2_prs_sram_entry sram;
+ u32 tcam[MVPP2_PRS_TCAM_WORDS];
+ u32 sram[MVPP2_PRS_SRAM_WORDS];
+};
+
+struct mvpp2_prs_result_info {
+ u32 ri;
+ u32 ri_mask;
};
struct mvpp2_prs_shadow {
@@ -288,10 +294,21 @@ struct mvpp2_prs_shadow {
int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv);
+int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
+ int tid);
+
+unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe);
+
+void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
+ unsigned int offs, unsigned char *byte,
+ unsigned char *enable);
+
int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add);
int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type);
+int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask);
+
int mvpp2_prs_def_flow(struct mvpp2_port *port);
void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port);
@@ -311,4 +328,6 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port);
int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da);
+int mvpp2_prs_hits(struct mvpp2 *priv, int index);
+
#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index d8ebf0a05e0c..6e6abdc399de 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -605,10 +605,10 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
dma_addr_t dma_addr;
int i;
- eth->scratch_ring = dma_alloc_coherent(eth->dev,
- cnt * sizeof(struct mtk_tx_dma),
- &eth->phy_scratch_ring,
- GFP_ATOMIC | __GFP_ZERO);
+ eth->scratch_ring = dma_zalloc_coherent(eth->dev,
+ cnt * sizeof(struct mtk_tx_dma),
+ &eth->phy_scratch_ring,
+ GFP_ATOMIC);
if (unlikely(!eth->scratch_ring))
return -ENOMEM;
@@ -623,7 +623,6 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
return -ENOMEM;
- memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
phy_ring_tail = eth->phy_scratch_ring +
(sizeof(struct mtk_tx_dma) * (cnt - 1));
@@ -1221,14 +1220,11 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
if (!ring->buf)
goto no_tx_mem;
- ring->dma = dma_alloc_coherent(eth->dev,
- MTK_DMA_SIZE * sz,
- &ring->phys,
- GFP_ATOMIC | __GFP_ZERO);
+ ring->dma = dma_zalloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
+ &ring->phys, GFP_ATOMIC);
if (!ring->dma)
goto no_tx_mem;
- memset(ring->dma, 0, MTK_DMA_SIZE * sz);
for (i = 0; i < MTK_DMA_SIZE; i++) {
int next = (i + 1) % MTK_DMA_SIZE;
u32 next_ptr = ring->phys + next * sz;
@@ -1321,10 +1317,9 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
return -ENOMEM;
}
- ring->dma = dma_alloc_coherent(eth->dev,
- rx_dma_size * sizeof(*ring->dma),
- &ring->phys,
- GFP_ATOMIC | __GFP_ZERO);
+ ring->dma = dma_zalloc_coherent(eth->dev,
+ rx_dma_size * sizeof(*ring->dma),
+ &ring->phys, GFP_ATOMIC);
if (!ring->dma)
return -ENOMEM;
@@ -2463,42 +2458,6 @@ free_netdev:
return err;
}
-static int mtk_get_chip_id(struct mtk_eth *eth, u32 *chip_id)
-{
- u32 val[2], id[4];
-
- regmap_read(eth->ethsys, ETHSYS_CHIPID0_3, &val[0]);
- regmap_read(eth->ethsys, ETHSYS_CHIPID4_7, &val[1]);
-
- id[3] = ((val[0] >> 16) & 0xff) - '0';
- id[2] = ((val[0] >> 24) & 0xff) - '0';
- id[1] = (val[1] & 0xff) - '0';
- id[0] = ((val[1] >> 8) & 0xff) - '0';
-
- *chip_id = (id[3] * 1000) + (id[2] * 100) +
- (id[1] * 10) + id[0];
-
- if (!(*chip_id)) {
- dev_err(eth->dev, "failed to get chip id\n");
- return -ENODEV;
- }
-
- dev_info(eth->dev, "chip id = %d\n", *chip_id);
-
- return 0;
-}
-
-static bool mtk_is_hwlro_supported(struct mtk_eth *eth)
-{
- switch (eth->chip_id) {
- case MT7622_ETH:
- case MT7623_ETH:
- return true;
- }
-
- return false;
-}
-
static int mtk_probe(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2577,11 +2536,7 @@ static int mtk_probe(struct platform_device *pdev)
if (err)
return err;
- err = mtk_get_chip_id(eth, &eth->chip_id);
- if (err)
- return err;
-
- eth->hwlro = mtk_is_hwlro_supported(eth);
+ eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
for_each_child_of_node(pdev->dev.of_node, mac_np) {
if (!of_device_is_compatible(mac_np,
@@ -2670,19 +2625,19 @@ static int mtk_remove(struct platform_device *pdev)
}
static const struct mtk_soc_data mt2701_data = {
- .caps = MTK_GMAC1_TRGMII,
+ .caps = MTK_GMAC1_TRGMII | MTK_HWLRO,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
};
static const struct mtk_soc_data mt7622_data = {
- .caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW,
+ .caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW | MTK_HWLRO,
.required_clks = MT7622_CLKS_BITMAP,
.required_pctl = false,
};
static const struct mtk_soc_data mt7623_data = {
- .caps = MTK_GMAC1_TRGMII,
+ .caps = MTK_GMAC1_TRGMII | MTK_HWLRO,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
};
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 672b8c353c47..46819297fc3e 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -566,6 +566,7 @@ struct mtk_rx_ring {
#define MTK_GMAC2_SGMII (BIT(10) | MTK_SGMII)
#define MTK_DUAL_GMAC_SHARED_SGMII (BIT(11) | MTK_GMAC1_SGMII | \
MTK_GMAC2_SGMII)
+#define MTK_HWLRO BIT(12)
#define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x))
/* struct mtk_eth_data - This is the structure holding all differences
@@ -635,7 +636,6 @@ struct mtk_eth {
struct regmap *ethsys;
struct regmap *sgmiisys;
struct regmap *pctl;
- u32 chip_id;
bool hwlro;
refcount_t dma_refcnt;
struct mtk_tx_ring tx_ring;
diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile
index 16b10d01fcf4..3f400770fcd8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx4/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_MLX4_CORE) += mlx4_core.o
mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o fw_qos.o icm.o intf.o \
main.o mcg.o mr.o pd.o port.o profile.o qp.o reset.o sense.o \
- srq.o resource_tracker.o
+ srq.o resource_tracker.o crdump.o
obj-$(CONFIG_MLX4_EN) += mlx4_en.o
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index e2b6b0cac1ac..c81d15bf259c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -178,10 +178,12 @@ void mlx4_enter_error_state(struct mlx4_dev_persistent *persist)
dev = persist->dev;
mlx4_err(dev, "device is going to be reset\n");
- if (mlx4_is_slave(dev))
+ if (mlx4_is_slave(dev)) {
err = mlx4_reset_slave(dev);
- else
+ } else {
+ mlx4_crdump_collect(dev);
err = mlx4_reset_master(dev);
+ }
if (!err) {
mlx4_err(dev, "device was reset successfully\n");
@@ -212,7 +214,7 @@ static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist)
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP &&
!(persist->interface_state & MLX4_INTERFACE_STATE_DELETION)) {
- err = mlx4_restart_one(persist->pdev);
+ err = mlx4_restart_one(persist->pdev, false, NULL);
mlx4_info(persist->dev, "mlx4_restart_one was ended, ret=%d\n",
err);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c
new file mode 100644
index 000000000000..88316c743820
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/crdump.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "mlx4.h"
+
+#define BAD_ACCESS 0xBADACCE5
+#define HEALTH_BUFFER_SIZE 0x40
+#define CR_ENABLE_BIT swab32(BIT(6))
+#define CR_ENABLE_BIT_OFFSET 0xF3F04
+#define MAX_NUM_OF_DUMPS_TO_STORE (8)
+
+static const char *region_cr_space_str = "cr-space";
+static const char *region_fw_health_str = "fw-health";
+
+/* Set to true in case cr enable bit was set to true before crdump */
+static bool crdump_enbale_bit_set;
+
+static void crdump_enable_crspace_access(struct mlx4_dev *dev,
+ u8 __iomem *cr_space)
+{
+ /* Get current enable bit value */
+ crdump_enbale_bit_set =
+ readl(cr_space + CR_ENABLE_BIT_OFFSET) & CR_ENABLE_BIT;
+
+ /* Enable FW CR filter (set bit6 to 0) */
+ if (crdump_enbale_bit_set)
+ writel(readl(cr_space + CR_ENABLE_BIT_OFFSET) & ~CR_ENABLE_BIT,
+ cr_space + CR_ENABLE_BIT_OFFSET);
+
+ /* Enable block volatile crspace accesses */
+ writel(swab32(1), cr_space + dev->caps.health_buffer_addrs +
+ HEALTH_BUFFER_SIZE);
+}
+
+static void crdump_disable_crspace_access(struct mlx4_dev *dev,
+ u8 __iomem *cr_space)
+{
+ /* Disable block volatile crspace accesses */
+ writel(0, cr_space + dev->caps.health_buffer_addrs +
+ HEALTH_BUFFER_SIZE);
+
+ /* Restore FW CR filter value (set bit6 to original value) */
+ if (crdump_enbale_bit_set)
+ writel(readl(cr_space + CR_ENABLE_BIT_OFFSET) | CR_ENABLE_BIT,
+ cr_space + CR_ENABLE_BIT_OFFSET);
+}
+
+static void mlx4_crdump_collect_crspace(struct mlx4_dev *dev,
+ u8 __iomem *cr_space,
+ u32 id)
+{
+ struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+ struct pci_dev *pdev = dev->persist->pdev;
+ unsigned long cr_res_size;
+ u8 *crspace_data;
+ int offset;
+ int err;
+
+ if (!crdump->region_crspace) {
+ mlx4_err(dev, "crdump: cr-space region is NULL\n");
+ return;
+ }
+
+ /* Try to collect CR space */
+ cr_res_size = pci_resource_len(pdev, 0);
+ crspace_data = kvmalloc(cr_res_size, GFP_KERNEL);
+ if (crspace_data) {
+ for (offset = 0; offset < cr_res_size; offset += 4)
+ *(u32 *)(crspace_data + offset) =
+ readl(cr_space + offset);
+
+ err = devlink_region_snapshot_create(crdump->region_crspace,
+ cr_res_size, crspace_data,
+ id, &kvfree);
+ if (err) {
+ kvfree(crspace_data);
+ mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n",
+ region_cr_space_str, id, err);
+ } else {
+ mlx4_info(dev, "crdump: added snapshot %d to devlink region %s\n",
+ id, region_cr_space_str);
+ }
+ } else {
+ mlx4_err(dev, "crdump: Failed to allocate crspace buffer\n");
+ }
+}
+
+static void mlx4_crdump_collect_fw_health(struct mlx4_dev *dev,
+ u8 __iomem *cr_space,
+ u32 id)
+{
+ struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+ u8 *health_data;
+ int offset;
+ int err;
+
+ if (!crdump->region_fw_health) {
+ mlx4_err(dev, "crdump: fw-health region is NULL\n");
+ return;
+ }
+
+ /* Try to collect health buffer */
+ health_data = kvmalloc(HEALTH_BUFFER_SIZE, GFP_KERNEL);
+ if (health_data) {
+ u8 __iomem *health_buf_start =
+ cr_space + dev->caps.health_buffer_addrs;
+
+ for (offset = 0; offset < HEALTH_BUFFER_SIZE; offset += 4)
+ *(u32 *)(health_data + offset) =
+ readl(health_buf_start + offset);
+
+ err = devlink_region_snapshot_create(crdump->region_fw_health,
+ HEALTH_BUFFER_SIZE,
+ health_data,
+ id, &kvfree);
+ if (err) {
+ kvfree(health_data);
+ mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n",
+ region_fw_health_str, id, err);
+ } else {
+ mlx4_info(dev, "crdump: added snapshot %d to devlink region %s\n",
+ id, region_fw_health_str);
+ }
+ } else {
+ mlx4_err(dev, "crdump: Failed to allocate health buffer\n");
+ }
+}
+
+int mlx4_crdump_collect(struct mlx4_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(mlx4_priv(dev));
+ struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+ struct pci_dev *pdev = dev->persist->pdev;
+ unsigned long cr_res_size;
+ u8 __iomem *cr_space;
+ u32 id;
+
+ if (!dev->caps.health_buffer_addrs) {
+ mlx4_info(dev, "crdump: FW doesn't support health buffer access, skipping\n");
+ return 0;
+ }
+
+ if (!crdump->snapshot_enable) {
+ mlx4_info(dev, "crdump: devlink snapshot disabled, skipping\n");
+ return 0;
+ }
+
+ cr_res_size = pci_resource_len(pdev, 0);
+
+ cr_space = ioremap(pci_resource_start(pdev, 0), cr_res_size);
+ if (!cr_space) {
+ mlx4_err(dev, "crdump: Failed to map pci cr region\n");
+ return -ENODEV;
+ }
+
+ crdump_enable_crspace_access(dev, cr_space);
+
+ /* Get the available snapshot ID for the dumps */
+ id = devlink_region_shapshot_id_get(devlink);
+
+ /* Try to capture dumps */
+ mlx4_crdump_collect_crspace(dev, cr_space, id);
+ mlx4_crdump_collect_fw_health(dev, cr_space, id);
+
+ crdump_disable_crspace_access(dev, cr_space);
+
+ iounmap(cr_space);
+ return 0;
+}
+
+int mlx4_crdump_init(struct mlx4_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(mlx4_priv(dev));
+ struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+ struct pci_dev *pdev = dev->persist->pdev;
+
+ crdump->snapshot_enable = false;
+
+ /* Create cr-space region */
+ crdump->region_crspace =
+ devlink_region_create(devlink,
+ region_cr_space_str,
+ MAX_NUM_OF_DUMPS_TO_STORE,
+ pci_resource_len(pdev, 0));
+ if (IS_ERR(crdump->region_crspace))
+ mlx4_warn(dev, "crdump: create devlink region %s err %ld\n",
+ region_cr_space_str,
+ PTR_ERR(crdump->region_crspace));
+
+ /* Create fw-health region */
+ crdump->region_fw_health =
+ devlink_region_create(devlink,
+ region_fw_health_str,
+ MAX_NUM_OF_DUMPS_TO_STORE,
+ HEALTH_BUFFER_SIZE);
+ if (IS_ERR(crdump->region_fw_health))
+ mlx4_warn(dev, "crdump: create devlink region %s err %ld\n",
+ region_fw_health_str,
+ PTR_ERR(crdump->region_fw_health));
+
+ return 0;
+}
+
+void mlx4_crdump_end(struct mlx4_dev *dev)
+{
+ struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+
+ devlink_region_destroy(crdump->region_fw_health);
+ devlink_region_destroy(crdump->region_crspace);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 65eb06e017e4..6785661d1a72 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2926,7 +2926,6 @@ static int mlx4_xdp(struct net_device *dev, struct netdev_bpf *xdp)
return mlx4_xdp_set(dev, xdp->prog);
case XDP_QUERY_PROG:
xdp->prog_id = mlx4_xdp_query(dev);
- xdp->prog_attached = !!xdp->prog_id;
return 0;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 3360f7b9ee73..a1aeeb8094c3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -795,8 +795,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
goto xdp_drop_no_cnt; /* Drop on xmit failure */
default:
bpf_warn_invalid_xdp_action(act);
+ /* fall through */
case XDP_ABORTED:
trace_xdp_exception(dev, xdp_prog, act);
+ /* fall through */
case XDP_DROP:
ring->xdp_drop++;
xdp_drop_no_cnt:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 0227786308af..1857ee0f0871 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -688,15 +688,16 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
}
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u16 rings_p_up = priv->num_tx_rings_p_up;
if (netdev_get_num_tc(dev))
- return fallback(dev, skb);
+ return fallback(dev, skb, NULL);
- return fallback(dev, skb) % rings_p_up;
+ return fallback(dev, skb, NULL) % rings_p_up;
}
static void mlx4_bf_copy(void __iomem *dst, const void *src,
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 46dcbfbe4c5e..babcfd9c0571 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -825,7 +825,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc
#define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0
#define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2
-
+#define QUERY_DEV_CAP_HEALTH_BUFFER_ADDRESS_OFFSET 0xe4
dev_cap->flags2 = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -1082,6 +1082,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->rl_caps.min_unit = size >> 14;
}
+ MLX4_GET(dev_cap->health_buffer_addrs, outbox,
+ QUERY_DEV_CAP_HEALTH_BUFFER_ADDRESS_OFFSET);
+
MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
if (field32 & (1 << 16))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index cd6399c76bfd..650ae08c71de 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -128,6 +128,7 @@ struct mlx4_dev_cap {
u32 dmfs_high_rate_qpn_base;
u32 dmfs_high_rate_qpn_range;
struct mlx4_rate_limit_caps rl_caps;
+ u32 health_buffer_addrs;
struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
bool wol_port[MLX4_MAX_PORTS + 1];
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 872014702fc1..d2d59444f562 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -159,9 +159,10 @@ static bool use_prio;
module_param_named(use_prio, use_prio, bool, 0444);
MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)");
-int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
+int log_mtts_per_seg = ilog2(1);
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
-MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
+MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment "
+ "(0-7) (default: 0)");
static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
static int arr_argc = 2;
@@ -177,6 +178,131 @@ struct mlx4_port_config {
static atomic_t pf_loading = ATOMIC_INIT(0);
+static int mlx4_devlink_ierr_reset_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ ctx->val.vbool = !!mlx4_internal_err_reset;
+ return 0;
+}
+
+static int mlx4_devlink_ierr_reset_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ mlx4_internal_err_reset = ctx->val.vbool;
+ return 0;
+}
+
+static int mlx4_devlink_crdump_snapshot_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx4_priv *priv = devlink_priv(devlink);
+ struct mlx4_dev *dev = &priv->dev;
+
+ ctx->val.vbool = dev->persist->crdump.snapshot_enable;
+ return 0;
+}
+
+static int mlx4_devlink_crdump_snapshot_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx4_priv *priv = devlink_priv(devlink);
+ struct mlx4_dev *dev = &priv->dev;
+
+ dev->persist->crdump.snapshot_enable = ctx->val.vbool;
+ return 0;
+}
+
+static int
+mlx4_devlink_max_macs_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ u32 value = val.vu32;
+
+ if (value < 1 || value > 128)
+ return -ERANGE;
+
+ if (!is_power_of_2(value)) {
+ NL_SET_ERR_MSG_MOD(extack, "max_macs supported must be power of 2");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+enum mlx4_devlink_param_id {
+ MLX4_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+};
+
+static const struct devlink_param mlx4_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(INT_ERR_RESET,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME) |
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ mlx4_devlink_ierr_reset_get,
+ mlx4_devlink_ierr_reset_set, NULL),
+ DEVLINK_PARAM_GENERIC(MAX_MACS,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, mlx4_devlink_max_macs_validate),
+ DEVLINK_PARAM_GENERIC(REGION_SNAPSHOT,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME) |
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ mlx4_devlink_crdump_snapshot_get,
+ mlx4_devlink_crdump_snapshot_set, NULL),
+ DEVLINK_PARAM_DRIVER(MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+ "enable_64b_cqe_eqe", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, NULL),
+ DEVLINK_PARAM_DRIVER(MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+ "enable_4k_uar", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, NULL),
+};
+
+static void mlx4_devlink_set_init_value(struct devlink *devlink, u32 param_id,
+ union devlink_param_value init_val)
+{
+ struct mlx4_priv *priv = devlink_priv(devlink);
+ struct mlx4_dev *dev = &priv->dev;
+ int err;
+
+ err = devlink_param_driverinit_value_set(devlink, param_id, init_val);
+ if (err)
+ mlx4_warn(dev,
+ "devlink set parameter %u value failed (err = %d)",
+ param_id, err);
+}
+
+static void mlx4_devlink_set_params_init_values(struct devlink *devlink)
+{
+ union devlink_param_value value;
+
+ value.vbool = !!mlx4_internal_err_reset;
+ mlx4_devlink_set_init_value(devlink,
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+ value);
+
+ value.vu32 = 1UL << log_num_mac;
+ mlx4_devlink_set_init_value(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS, value);
+
+ value.vbool = enable_64b_cqe_eqe;
+ mlx4_devlink_set_init_value(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+ value);
+
+ value.vbool = enable_4k_uar;
+ mlx4_devlink_set_init_value(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+ value);
+
+ value.vbool = false;
+ mlx4_devlink_set_init_value(devlink,
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+ value);
+}
+
static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
struct mlx4_dev_cap *dev_cap)
{
@@ -428,6 +554,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
dev->caps.wol_port[1] = dev_cap->wol_port[1];
dev->caps.wol_port[2] = dev_cap->wol_port[2];
+ dev->caps.health_buffer_addrs = dev_cap->health_buffer_addrs;
/* Save uar page shift */
if (!mlx4_is_slave(dev)) {
@@ -3711,10 +3838,14 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
}
}
- err = mlx4_catas_init(&priv->dev);
+ err = mlx4_crdump_init(&priv->dev);
if (err)
goto err_release_regions;
+ err = mlx4_catas_init(&priv->dev);
+ if (err)
+ goto err_crdump;
+
err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0);
if (err)
goto err_catas;
@@ -3724,6 +3855,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
err_catas:
mlx4_catas_end(&priv->dev);
+err_crdump:
+ mlx4_crdump_end(&priv->dev);
+
err_release_regions:
pci_release_regions(pdev);
@@ -3757,8 +3891,68 @@ static int mlx4_devlink_port_type_set(struct devlink_port *devlink_port,
return __set_port_type(info, mlx4_port_type);
}
+static void mlx4_devlink_param_load_driverinit_values(struct devlink *devlink)
+{
+ struct mlx4_priv *priv = devlink_priv(devlink);
+ struct mlx4_dev *dev = &priv->dev;
+ struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+ union devlink_param_value saved_value;
+ int err;
+
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+ &saved_value);
+ if (!err && mlx4_internal_err_reset != saved_value.vbool) {
+ mlx4_internal_err_reset = saved_value.vbool;
+ /* Notify on value changed on runtime configuration mode */
+ devlink_param_value_changed(devlink,
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET);
+ }
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ &saved_value);
+ if (!err)
+ log_num_mac = order_base_2(saved_value.vu32);
+ err = devlink_param_driverinit_value_get(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+ &saved_value);
+ if (!err)
+ enable_64b_cqe_eqe = saved_value.vbool;
+ err = devlink_param_driverinit_value_get(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+ &saved_value);
+ if (!err)
+ enable_4k_uar = saved_value.vbool;
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+ &saved_value);
+ if (!err && crdump->snapshot_enable != saved_value.vbool) {
+ crdump->snapshot_enable = saved_value.vbool;
+ devlink_param_value_changed(devlink,
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT);
+ }
+}
+
+static int mlx4_devlink_reload(struct devlink *devlink,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx4_priv *priv = devlink_priv(devlink);
+ struct mlx4_dev *dev = &priv->dev;
+ struct mlx4_dev_persistent *persist = dev->persist;
+ int err;
+
+ if (persist->num_vfs)
+ mlx4_warn(persist->dev, "Reload performed on PF, will cause reset on operating Virtual Functions\n");
+ err = mlx4_restart_one(persist->pdev, true, devlink);
+ if (err)
+ mlx4_err(persist->dev, "mlx4_restart_one failed, ret=%d\n", err);
+
+ return err;
+}
+
static const struct devlink_ops mlx4_devlink_ops = {
.port_type_set = mlx4_devlink_port_type_set,
+ .reload = mlx4_devlink_reload,
};
static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -3792,14 +3986,21 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
ret = devlink_register(devlink, &pdev->dev);
if (ret)
goto err_persist_free;
-
- ret = __mlx4_init_one(pdev, id->driver_data, priv);
+ ret = devlink_params_register(devlink, mlx4_devlink_params,
+ ARRAY_SIZE(mlx4_devlink_params));
if (ret)
goto err_devlink_unregister;
+ mlx4_devlink_set_params_init_values(devlink);
+ ret = __mlx4_init_one(pdev, id->driver_data, priv);
+ if (ret)
+ goto err_params_unregister;
pci_save_state(pdev);
return 0;
+err_params_unregister:
+ devlink_params_unregister(devlink, mlx4_devlink_params,
+ ARRAY_SIZE(mlx4_devlink_params));
err_devlink_unregister:
devlink_unregister(devlink);
err_persist_free:
@@ -3929,6 +4130,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
else
mlx4_info(dev, "%s: interface is down\n", __func__);
mlx4_catas_end(dev);
+ mlx4_crdump_end(dev);
if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
mlx4_warn(dev, "Disabling SR-IOV\n");
pci_disable_sriov(pdev);
@@ -3936,6 +4138,8 @@ static void mlx4_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
mlx4_pci_disable_device(dev);
+ devlink_params_unregister(devlink, mlx4_devlink_params,
+ ARRAY_SIZE(mlx4_devlink_params));
devlink_unregister(devlink);
kfree(dev->persist);
devlink_free(devlink);
@@ -3960,7 +4164,7 @@ static int restore_current_port_types(struct mlx4_dev *dev,
return err;
}
-int mlx4_restart_one(struct pci_dev *pdev)
+int mlx4_restart_one(struct pci_dev *pdev, bool reload, struct devlink *devlink)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
@@ -3973,6 +4177,8 @@ int mlx4_restart_one(struct pci_dev *pdev)
memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
mlx4_unload_one(pdev);
+ if (reload)
+ mlx4_devlink_param_load_driverinit_values(devlink);
err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1);
if (err) {
mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n",
@@ -4205,7 +4411,7 @@ static int __init mlx4_verify_params(void)
if (use_prio != 0)
pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
- if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
+ if ((log_mtts_per_seg < 0) || (log_mtts_per_seg > 7)) {
pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n",
log_mtts_per_seg);
return -1;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 4c5306dbcf11..ffed2d4c9403 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1412,6 +1412,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
case MLX4_STEERING_MODE_A0:
if (prot == MLX4_PROT_ETH)
return 0;
+ /* fall through */
case MLX4_STEERING_MODE_B0:
if (prot == MLX4_PROT_ETH)
@@ -1441,6 +1442,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
case MLX4_STEERING_MODE_A0:
if (prot == MLX4_PROT_ETH)
return 0;
+ /* fall through */
case MLX4_STEERING_MODE_B0:
if (prot == MLX4_PROT_ETH)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index cb9e923e8399..ebcd2778eeb3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -84,7 +84,6 @@ enum {
MLX4_MIN_MGM_LOG_ENTRY_SIZE = 7,
MLX4_MAX_MGM_LOG_ENTRY_SIZE = 12,
MLX4_MAX_QP_PER_MGM = 4 * ((1 << MLX4_MAX_MGM_LOG_ENTRY_SIZE) / 16 - 2),
- MLX4_MTT_ENTRY_PER_SEG = 8,
};
enum {
@@ -1042,7 +1041,10 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev);
void mlx4_stop_catas_poll(struct mlx4_dev *dev);
int mlx4_catas_init(struct mlx4_dev *dev);
void mlx4_catas_end(struct mlx4_dev *dev);
-int mlx4_restart_one(struct pci_dev *pdev);
+int mlx4_crdump_init(struct mlx4_dev *dev);
+void mlx4_crdump_end(struct mlx4_dev *dev);
+int mlx4_restart_one(struct pci_dev *pdev, bool reload,
+ struct devlink *devlink);
int mlx4_register_device(struct mlx4_dev *dev);
void mlx4_unregister_device(struct mlx4_dev *dev);
void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
@@ -1227,6 +1229,8 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
int mlx4_comm_internal_err(u32 slave_read);
+int mlx4_crdump_collect(struct mlx4_dev *dev);
+
int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
enum mlx4_port_type *type);
void mlx4_do_sense_ports(struct mlx4_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index ace6545f82e6..c3228b89df46 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -699,7 +699,8 @@ void mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
void mlx4_en_tx_irq(struct mlx4_cq *mcq);
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback);
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback);
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
struct mlx4_en_rx_alloc *frame,
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index bae8b22edbb7..ba361c5fbda3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -105,7 +105,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
request->num_mtt =
roundup_pow_of_two(max_t(unsigned, request->num_mtt,
min(1UL << (31 - log_mtts_per_seg),
- si.totalram >> (log_mtts_per_seg - 1))));
+ (si.totalram << 1) >> log_mtts_per_seg)));
+
profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz;
profile[MLX4_RES_RDMARC].size = dev_cap->rdmarc_entry_sz;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 2545296a0c08..37a551436e4a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -3,10 +3,11 @@
#
config MLX5_CORE
- tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver"
+ tristate "Mellanox 5th generation network adapters (ConnectX series) core driver"
depends on MAY_USE_DEVLINK
depends on PCI
imply PTP_1588_CLOCK
+ imply VXLAN
default n
---help---
Core driver for low level functionality of the ConnectX-4 and
@@ -27,7 +28,7 @@ config MLX5_FPGA
sandbox-specific client drivers.
config MLX5_CORE_EN
- bool "Mellanox Technologies ConnectX-4 Ethernet support"
+ bool "Mellanox 5th generation network adapters (ConnectX series) Ethernet support"
depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE
depends on IPV6=y || IPV6=n || MLX5_CORE=m
select PAGE_POOL
@@ -35,6 +36,24 @@ config MLX5_CORE_EN
---help---
Ethernet support in Mellanox Technologies ConnectX-4 NIC.
+config MLX5_EN_ARFS
+ bool "Mellanox MLX5 ethernet accelerated receive flow steering (ARFS) support"
+ depends on MLX5_CORE_EN && RFS_ACCEL
+ default y
+ ---help---
+ Mellanox MLX5 ethernet hardware-accelerated receive flow steering support,
+ Enables ethernet netdevice arfs support and ntuple filtering.
+
+config MLX5_EN_RXNFC
+ bool "Mellanox MLX5 ethernet rx nfc flow steering support"
+ depends on MLX5_CORE_EN
+ default y
+ ---help---
+ Mellanox MLX5 ethernet rx nfc flow steering support
+ Enables ethtool receive network flow classification, which allows user defined
+ flow rules to direct traffic into arbitrary rx queue via ethtool set/get_rxnfc
+ API.
+
config MLX5_MPFS
bool "Mellanox Technologies MLX5 MPFS support"
depends on MLX5_CORE_EN
@@ -69,7 +88,7 @@ config MLX5_CORE_EN_DCB
If unsure, set to Y
config MLX5_CORE_IPOIB
- bool "Mellanox Technologies ConnectX-4 IPoIB offloads support"
+ bool "Mellanox 5th generation network adapters (connectX series) IPoIB offloads support"
depends on MLX5_CORE_EN
default n
---help---
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 9efbf193ad5a..d324a3884462 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -1,33 +1,61 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
+#
+# Makefile for Mellanox 5th generation network adapters
+# (ConnectX series) core & netdev driver
+#
+
subdir-ccflags-y += -I$(src)
+obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
+
+#
+# mlx5 core basic
+#
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
- fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o lib/clock.o \
- diag/fs_tracepoint.o
-
-mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o accel/tls.o
-
-mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
- fpga/ipsec.o fpga/tls.o
+ fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o \
+ diag/fs_tracepoint.o diag/fw_tracer.o
+#
+# Netdev basic
+#
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
- en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \
- en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o
-
-mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
-
-mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o en_rep.o en_tc.o
+ en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
+ en_selftest.o en/port.o
+
+#
+# Netdev extra
+#
+mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o
+mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
+mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
+mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o
+
+#
+# Core extra
+#
+mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o
+mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
+mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
+mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
+
+#
+# Ipoib netdev
+#
+mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib_vlan.o
-mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
+#
+# Accelerations & FPGA
+#
+mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o accel/tls.o
-mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib_vlan.o
+mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
+ fpga/ipsec.o fpga/tls.o
mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
- en_accel/ipsec_stats.o
+ en_accel/ipsec_stats.o
-mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o
+mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o
CFLAGS_tracepoint.o := -I$(src)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
new file mode 100644
index 000000000000..c13260467750
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
@@ -0,0 +1,37 @@
+#ifndef __MLX5E_ACCEL_H__
+#define __MLX5E_ACCEL_H__
+
+#ifdef CONFIG_MLX5_ACCEL
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include "en.h"
+
+static inline bool is_metadata_hdr_valid(struct sk_buff *skb)
+{
+ __be16 *ethtype;
+
+ if (unlikely(skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN))
+ return false;
+ ethtype = (__be16 *)(skb->data + ETH_ALEN * 2);
+ if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE))
+ return false;
+ return true;
+}
+
+static inline void remove_metadata_hdr(struct sk_buff *skb)
+{
+ struct ethhdr *old_eth;
+ struct ethhdr *new_eth;
+
+ /* Remove the metadata from the buffer */
+ old_eth = (struct ethhdr *)skb->data;
+ new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN);
+ memmove(new_eth, old_eth, 2 * ETH_ALEN);
+ /* Ethertype is already in its new place */
+ skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN);
+}
+
+#endif /* CONFIG_MLX5_ACCEL */
+
+#endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
index 77ac19f38cbe..da7bd26368f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
@@ -37,17 +37,26 @@
#include "mlx5_core.h"
#include "fpga/tls.h"
-int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid)
+int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid,
+ bool direction_sx)
{
- return mlx5_fpga_tls_add_tx_flow(mdev, flow, crypto_info,
- start_offload_tcp_sn, p_swid);
+ return mlx5_fpga_tls_add_flow(mdev, flow, crypto_info,
+ start_offload_tcp_sn, p_swid,
+ direction_sx);
}
-void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid)
+void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
+ bool direction_sx)
{
- mlx5_fpga_tls_del_tx_flow(mdev, swid, GFP_KERNEL);
+ mlx5_fpga_tls_del_flow(mdev, swid, GFP_KERNEL, direction_sx);
+}
+
+int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
+ u64 rcd_sn)
+{
+ return mlx5_fpga_tls_resync_rx(mdev, handle, seq, rcd_sn);
}
bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
index 6f9c9f446ecc..def4093ebfae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
@@ -60,10 +60,14 @@ struct mlx5_ifc_tls_flow_bits {
u8 reserved_at_2[0x1e];
};
-int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid);
-void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid);
+int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid,
+ bool direction_sx);
+void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
+ bool direction_sx);
+int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
+ u64 rcd_sn);
bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev);
u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev);
int mlx5_accel_tls_init(struct mlx5_core_dev *mdev);
@@ -72,10 +76,14 @@ void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev);
#else
static inline int
-mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid) { return 0; }
-static inline void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid) { }
+mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid,
+ bool direction_sx) { return -ENOTSUPP; }
+static inline void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
+ bool direction_sx) { }
+static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle,
+ u32 seq, u64 rcd_sn) { return 0; }
static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; }
static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; }
static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 384c1fa49081..fe4ac40dbade 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -211,7 +211,7 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent)
ent->ret = 0;
return;
}
- usleep_range(5000, 10000);
+ cond_resched();
} while (time_before(jiffies, poll_end));
ent->ret = -ETIMEDOUT;
@@ -278,6 +278,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DESTROY_PSV:
case MLX5_CMD_OP_DESTROY_SRQ:
case MLX5_CMD_OP_DESTROY_XRC_SRQ:
+ case MLX5_CMD_OP_DESTROY_XRQ:
case MLX5_CMD_OP_DESTROY_DCT:
case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
@@ -310,6 +311,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT:
case MLX5_CMD_OP_FPGA_DESTROY_QP:
+ case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -346,6 +348,9 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_CREATE_XRC_SRQ:
case MLX5_CMD_OP_QUERY_XRC_SRQ:
case MLX5_CMD_OP_ARM_XRC_SRQ:
+ case MLX5_CMD_OP_CREATE_XRQ:
+ case MLX5_CMD_OP_QUERY_XRQ:
+ case MLX5_CMD_OP_ARM_XRQ:
case MLX5_CMD_OP_CREATE_DCT:
case MLX5_CMD_OP_DRAIN_DCT:
case MLX5_CMD_OP_QUERY_DCT:
@@ -427,6 +432,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_FPGA_MODIFY_QP:
case MLX5_CMD_OP_FPGA_QUERY_QP:
case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS:
+ case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
return -EIO;
@@ -452,6 +458,7 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(SET_HCA_CAP);
MLX5_COMMAND_STR_CASE(QUERY_ISSI);
MLX5_COMMAND_STR_CASE(SET_ISSI);
+ MLX5_COMMAND_STR_CASE(SET_DRIVER_VERSION);
MLX5_COMMAND_STR_CASE(CREATE_MKEY);
MLX5_COMMAND_STR_CASE(QUERY_MKEY);
MLX5_COMMAND_STR_CASE(DESTROY_MKEY);
@@ -599,6 +606,12 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP);
MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP_COUNTERS);
MLX5_COMMAND_STR_CASE(FPGA_DESTROY_QP);
+ MLX5_COMMAND_STR_CASE(CREATE_XRQ);
+ MLX5_COMMAND_STR_CASE(DESTROY_XRQ);
+ MLX5_COMMAND_STR_CASE(QUERY_XRQ);
+ MLX5_COMMAND_STR_CASE(ARM_XRQ);
+ MLX5_COMMAND_STR_CASE(CREATE_GENERAL_OBJECT);
+ MLX5_COMMAND_STR_CASE(DESTROY_GENERAL_OBJECT);
default: return "unknown command opcode";
}
}
@@ -677,7 +690,7 @@ struct mlx5_ifc_mbox_out_bits {
struct mlx5_ifc_mbox_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -697,6 +710,7 @@ static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out)
u8 status;
u16 opcode;
u16 op_mod;
+ u16 uid;
mlx5_cmd_mbox_status(out, &status, &syndrome);
if (!status)
@@ -704,8 +718,15 @@ static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out)
opcode = MLX5_GET(mbox_in, in, opcode);
op_mod = MLX5_GET(mbox_in, in, op_mod);
+ uid = MLX5_GET(mbox_in, in, uid);
- mlx5_core_err(dev,
+ if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY)
+ mlx5_core_err_rl(dev,
+ "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
+ mlx5_command_str(opcode), opcode, op_mod,
+ cmd_status_str(status), status, syndrome);
+ else
+ mlx5_core_dbg(dev,
"%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
mlx5_command_str(opcode),
opcode, op_mod,
@@ -1022,7 +1043,10 @@ static ssize_t dbg_write(struct file *filp, const char __user *buf,
if (!dbg->in_msg || !dbg->out_msg)
return -ENOMEM;
- if (copy_from_user(lbuf, buf, sizeof(lbuf)))
+ if (count < sizeof(lbuf) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(lbuf, buf, sizeof(lbuf) - 1))
return -EFAULT;
lbuf[sizeof(lbuf) - 1] = 0;
@@ -1226,21 +1250,12 @@ static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
{
struct mlx5_core_dev *dev = filp->private_data;
struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
- int copy;
-
- if (*pos)
- return 0;
if (!dbg->out_msg)
return -ENOMEM;
- copy = min_t(int, count, dbg->outlen);
- if (copy_to_user(buf, dbg->out_msg, copy))
- return -EFAULT;
-
- *pos += copy;
-
- return copy;
+ return simple_read_from_buffer(buf, count, pos, dbg->out_msg,
+ dbg->outlen);
}
static const struct file_operations dfops = {
@@ -1258,19 +1273,11 @@ static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
char outlen[8];
int err;
- if (*pos)
- return 0;
-
err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
if (err < 0)
return err;
- if (copy_to_user(buf, &outlen, err))
- return -EFAULT;
-
- *pos += err;
-
- return err;
+ return simple_read_from_buffer(buf, count, pos, outlen, err);
}
static ssize_t outlen_write(struct file *filp, const char __user *buf,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 413080a312a7..90fabd612b6c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -150,22 +150,13 @@ static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
int ret;
char tbuf[22];
- if (*pos)
- return 0;
-
stats = filp->private_data;
spin_lock_irq(&stats->lock);
if (stats->n)
field = div64_u64(stats->sum, stats->n);
spin_unlock_irq(&stats->lock);
ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field);
- if (ret > 0) {
- if (copy_to_user(buf, tbuf, ret))
- return -EFAULT;
- }
-
- *pos += ret;
- return ret;
+ return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
static ssize_t average_write(struct file *filp, const char __user *buf,
@@ -442,9 +433,6 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
u64 field;
int ret;
- if (*pos)
- return 0;
-
desc = filp->private_data;
d = (void *)(desc - desc->i) - sizeof(*d);
switch (d->type) {
@@ -470,13 +458,7 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
else
ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
- if (ret > 0) {
- if (copy_to_user(buf, tbuf, ret))
- return -EFAULT;
- }
-
- *pos += ret;
- return ret;
+ return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
static const struct file_operations fops = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
index 09f178a3fcab..0240aee9189e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
@@ -138,6 +138,8 @@ TRACE_EVENT(mlx5_fs_del_fg,
{MLX5_FLOW_CONTEXT_ACTION_MOD_HDR, "MOD_HDR"},\
{MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH, "VLAN_PUSH"},\
{MLX5_FLOW_CONTEXT_ACTION_VLAN_POP, "VLAN_POP"},\
+ {MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2, "VLAN_PUSH_2"},\
+ {MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2, "VLAN_POP_2"},\
{MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO, "NEXT_PRIO"}
TRACE_EVENT(mlx5_fs_set_fte,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
new file mode 100644
index 000000000000..d4ec93bde4de
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -0,0 +1,947 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#define CREATE_TRACE_POINTS
+#include "fw_tracer.h"
+#include "fw_tracer_tracepoint.h"
+
+static int mlx5_query_mtrc_caps(struct mlx5_fw_tracer *tracer)
+{
+ u32 *string_db_base_address_out = tracer->str_db.base_address_out;
+ u32 *string_db_size_out = tracer->str_db.size_out;
+ struct mlx5_core_dev *dev = tracer->dev;
+ u32 out[MLX5_ST_SZ_DW(mtrc_cap)] = {0};
+ u32 in[MLX5_ST_SZ_DW(mtrc_cap)] = {0};
+ void *mtrc_cap_sp;
+ int err, i;
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MTRC_CAP, 0, 0);
+ if (err) {
+ mlx5_core_warn(dev, "FWTracer: Error reading tracer caps %d\n",
+ err);
+ return err;
+ }
+
+ if (!MLX5_GET(mtrc_cap, out, trace_to_memory)) {
+ mlx5_core_dbg(dev, "FWTracer: Device does not support logging traces to memory\n");
+ return -ENOTSUPP;
+ }
+
+ tracer->trc_ver = MLX5_GET(mtrc_cap, out, trc_ver);
+ tracer->str_db.first_string_trace =
+ MLX5_GET(mtrc_cap, out, first_string_trace);
+ tracer->str_db.num_string_trace =
+ MLX5_GET(mtrc_cap, out, num_string_trace);
+ tracer->str_db.num_string_db = MLX5_GET(mtrc_cap, out, num_string_db);
+ tracer->owner = !!MLX5_GET(mtrc_cap, out, trace_owner);
+
+ for (i = 0; i < tracer->str_db.num_string_db; i++) {
+ mtrc_cap_sp = MLX5_ADDR_OF(mtrc_cap, out, string_db_param[i]);
+ string_db_base_address_out[i] = MLX5_GET(mtrc_string_db_param,
+ mtrc_cap_sp,
+ string_db_base_address);
+ string_db_size_out[i] = MLX5_GET(mtrc_string_db_param,
+ mtrc_cap_sp, string_db_size);
+ }
+
+ return err;
+}
+
+static int mlx5_set_mtrc_caps_trace_owner(struct mlx5_fw_tracer *tracer,
+ u32 *out, u32 out_size,
+ u8 trace_owner)
+{
+ struct mlx5_core_dev *dev = tracer->dev;
+ u32 in[MLX5_ST_SZ_DW(mtrc_cap)] = {0};
+
+ MLX5_SET(mtrc_cap, in, trace_owner, trace_owner);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out, out_size,
+ MLX5_REG_MTRC_CAP, 0, 1);
+}
+
+static int mlx5_fw_tracer_ownership_acquire(struct mlx5_fw_tracer *tracer)
+{
+ struct mlx5_core_dev *dev = tracer->dev;
+ u32 out[MLX5_ST_SZ_DW(mtrc_cap)] = {0};
+ int err;
+
+ err = mlx5_set_mtrc_caps_trace_owner(tracer, out, sizeof(out),
+ MLX5_FW_TRACER_ACQUIRE_OWNERSHIP);
+ if (err) {
+ mlx5_core_warn(dev, "FWTracer: Acquire tracer ownership failed %d\n",
+ err);
+ return err;
+ }
+
+ tracer->owner = !!MLX5_GET(mtrc_cap, out, trace_owner);
+
+ if (!tracer->owner)
+ return -EBUSY;
+
+ return 0;
+}
+
+static void mlx5_fw_tracer_ownership_release(struct mlx5_fw_tracer *tracer)
+{
+ u32 out[MLX5_ST_SZ_DW(mtrc_cap)] = {0};
+
+ mlx5_set_mtrc_caps_trace_owner(tracer, out, sizeof(out),
+ MLX5_FW_TRACER_RELEASE_OWNERSHIP);
+ tracer->owner = false;
+}
+
+static int mlx5_fw_tracer_create_log_buf(struct mlx5_fw_tracer *tracer)
+{
+ struct mlx5_core_dev *dev = tracer->dev;
+ struct device *ddev = &dev->pdev->dev;
+ dma_addr_t dma;
+ void *buff;
+ gfp_t gfp;
+ int err;
+
+ tracer->buff.size = TRACE_BUFFER_SIZE_BYTE;
+
+ gfp = GFP_KERNEL | __GFP_ZERO;
+ buff = (void *)__get_free_pages(gfp,
+ get_order(tracer->buff.size));
+ if (!buff) {
+ err = -ENOMEM;
+ mlx5_core_warn(dev, "FWTracer: Failed to allocate pages, %d\n", err);
+ return err;
+ }
+ tracer->buff.log_buf = buff;
+
+ dma = dma_map_single(ddev, buff, tracer->buff.size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ddev, dma)) {
+ mlx5_core_warn(dev, "FWTracer: Unable to map DMA: %d\n",
+ dma_mapping_error(ddev, dma));
+ err = -ENOMEM;
+ goto free_pages;
+ }
+ tracer->buff.dma = dma;
+
+ return 0;
+
+free_pages:
+ free_pages((unsigned long)tracer->buff.log_buf, get_order(tracer->buff.size));
+
+ return err;
+}
+
+static void mlx5_fw_tracer_destroy_log_buf(struct mlx5_fw_tracer *tracer)
+{
+ struct mlx5_core_dev *dev = tracer->dev;
+ struct device *ddev = &dev->pdev->dev;
+
+ if (!tracer->buff.log_buf)
+ return;
+
+ dma_unmap_single(ddev, tracer->buff.dma, tracer->buff.size, DMA_FROM_DEVICE);
+ free_pages((unsigned long)tracer->buff.log_buf, get_order(tracer->buff.size));
+}
+
+static int mlx5_fw_tracer_create_mkey(struct mlx5_fw_tracer *tracer)
+{
+ struct mlx5_core_dev *dev = tracer->dev;
+ int err, inlen, i;
+ __be64 *mtt;
+ void *mkc;
+ u32 *in;
+
+ inlen = MLX5_ST_SZ_BYTES(create_mkey_in) +
+ sizeof(*mtt) * round_up(TRACER_BUFFER_PAGE_NUM, 2);
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
+ DIV_ROUND_UP(TRACER_BUFFER_PAGE_NUM, 2));
+ mtt = (u64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+ for (i = 0 ; i < TRACER_BUFFER_PAGE_NUM ; i++)
+ mtt[i] = cpu_to_be64(tracer->buff.dma + i * PAGE_SIZE);
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
+ MLX5_SET(mkc, mkc, lr, 1);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, pd, tracer->buff.pdn);
+ MLX5_SET(mkc, mkc, bsf_octword_size, 0);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
+ MLX5_SET(mkc, mkc, translations_octword_size,
+ DIV_ROUND_UP(TRACER_BUFFER_PAGE_NUM, 2));
+ MLX5_SET64(mkc, mkc, start_addr, tracer->buff.dma);
+ MLX5_SET64(mkc, mkc, len, tracer->buff.size);
+ err = mlx5_core_create_mkey(dev, &tracer->buff.mkey, in, inlen);
+ if (err)
+ mlx5_core_warn(dev, "FWTracer: Failed to create mkey, %d\n", err);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5_fw_tracer_free_strings_db(struct mlx5_fw_tracer *tracer)
+{
+ u32 num_string_db = tracer->str_db.num_string_db;
+ int i;
+
+ for (i = 0; i < num_string_db; i++) {
+ kfree(tracer->str_db.buffer[i]);
+ tracer->str_db.buffer[i] = NULL;
+ }
+}
+
+static int mlx5_fw_tracer_allocate_strings_db(struct mlx5_fw_tracer *tracer)
+{
+ u32 *string_db_size_out = tracer->str_db.size_out;
+ u32 num_string_db = tracer->str_db.num_string_db;
+ int i;
+
+ for (i = 0; i < num_string_db; i++) {
+ tracer->str_db.buffer[i] = kzalloc(string_db_size_out[i], GFP_KERNEL);
+ if (!tracer->str_db.buffer[i])
+ goto free_strings_db;
+ }
+
+ return 0;
+
+free_strings_db:
+ mlx5_fw_tracer_free_strings_db(tracer);
+ return -ENOMEM;
+}
+
+static void mlx5_tracer_read_strings_db(struct work_struct *work)
+{
+ struct mlx5_fw_tracer *tracer = container_of(work, struct mlx5_fw_tracer,
+ read_fw_strings_work);
+ u32 num_of_reads, num_string_db = tracer->str_db.num_string_db;
+ struct mlx5_core_dev *dev = tracer->dev;
+ u32 in[MLX5_ST_SZ_DW(mtrc_cap)] = {0};
+ u32 leftovers, offset;
+ int err = 0, i, j;
+ u32 *out, outlen;
+ void *out_value;
+
+ outlen = MLX5_ST_SZ_BYTES(mtrc_stdb) + STRINGS_DB_READ_SIZE_BYTES;
+ out = kzalloc(outlen, GFP_KERNEL);
+ if (!out) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < num_string_db; i++) {
+ offset = 0;
+ MLX5_SET(mtrc_stdb, in, string_db_index, i);
+ num_of_reads = tracer->str_db.size_out[i] /
+ STRINGS_DB_READ_SIZE_BYTES;
+ leftovers = (tracer->str_db.size_out[i] %
+ STRINGS_DB_READ_SIZE_BYTES) /
+ STRINGS_DB_LEFTOVER_SIZE_BYTES;
+
+ MLX5_SET(mtrc_stdb, in, read_size, STRINGS_DB_READ_SIZE_BYTES);
+ for (j = 0; j < num_of_reads; j++) {
+ MLX5_SET(mtrc_stdb, in, start_offset, offset);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ outlen, MLX5_REG_MTRC_STDB,
+ 0, 1);
+ if (err) {
+ mlx5_core_dbg(dev, "FWTracer: Failed to read strings DB %d\n",
+ err);
+ goto out_free;
+ }
+
+ out_value = MLX5_ADDR_OF(mtrc_stdb, out, string_db_data);
+ memcpy(tracer->str_db.buffer[i] + offset, out_value,
+ STRINGS_DB_READ_SIZE_BYTES);
+ offset += STRINGS_DB_READ_SIZE_BYTES;
+ }
+
+ /* Strings database is aligned to 64, need to read leftovers*/
+ MLX5_SET(mtrc_stdb, in, read_size,
+ STRINGS_DB_LEFTOVER_SIZE_BYTES);
+ for (j = 0; j < leftovers; j++) {
+ MLX5_SET(mtrc_stdb, in, start_offset, offset);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ outlen, MLX5_REG_MTRC_STDB,
+ 0, 1);
+ if (err) {
+ mlx5_core_dbg(dev, "FWTracer: Failed to read strings DB %d\n",
+ err);
+ goto out_free;
+ }
+
+ out_value = MLX5_ADDR_OF(mtrc_stdb, out, string_db_data);
+ memcpy(tracer->str_db.buffer[i] + offset, out_value,
+ STRINGS_DB_LEFTOVER_SIZE_BYTES);
+ offset += STRINGS_DB_LEFTOVER_SIZE_BYTES;
+ }
+ }
+
+ tracer->str_db.loaded = true;
+
+out_free:
+ kfree(out);
+out:
+ return;
+}
+
+static void mlx5_fw_tracer_arm(struct mlx5_core_dev *dev)
+{
+ u32 out[MLX5_ST_SZ_DW(mtrc_ctrl)] = {0};
+ u32 in[MLX5_ST_SZ_DW(mtrc_ctrl)] = {0};
+ int err;
+
+ MLX5_SET(mtrc_ctrl, in, arm_event, 1);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MTRC_CTRL, 0, 1);
+ if (err)
+ mlx5_core_warn(dev, "FWTracer: Failed to arm tracer event %d\n", err);
+}
+
+static const char *VAL_PARM = "%llx";
+static const char *REPLACE_64_VAL_PARM = "%x%x";
+static const char *PARAM_CHAR = "%";
+
+static int mlx5_tracer_message_hash(u32 message_id)
+{
+ return jhash_1word(message_id, 0) & (MESSAGE_HASH_SIZE - 1);
+}
+
+static struct tracer_string_format *mlx5_tracer_message_insert(struct mlx5_fw_tracer *tracer,
+ struct tracer_event *tracer_event)
+{
+ struct hlist_head *head =
+ &tracer->hash[mlx5_tracer_message_hash(tracer_event->string_event.tmsn)];
+ struct tracer_string_format *cur_string;
+
+ cur_string = kzalloc(sizeof(*cur_string), GFP_KERNEL);
+ if (!cur_string)
+ return NULL;
+
+ hlist_add_head(&cur_string->hlist, head);
+
+ return cur_string;
+}
+
+static struct tracer_string_format *mlx5_tracer_get_string(struct mlx5_fw_tracer *tracer,
+ struct tracer_event *tracer_event)
+{
+ struct tracer_string_format *cur_string;
+ u32 str_ptr, offset;
+ int i;
+
+ str_ptr = tracer_event->string_event.string_param;
+
+ for (i = 0; i < tracer->str_db.num_string_db; i++) {
+ if (str_ptr > tracer->str_db.base_address_out[i] &&
+ str_ptr < tracer->str_db.base_address_out[i] +
+ tracer->str_db.size_out[i]) {
+ offset = str_ptr - tracer->str_db.base_address_out[i];
+ /* add it to the hash */
+ cur_string = mlx5_tracer_message_insert(tracer, tracer_event);
+ if (!cur_string)
+ return NULL;
+ cur_string->string = (char *)(tracer->str_db.buffer[i] +
+ offset);
+ return cur_string;
+ }
+ }
+
+ return NULL;
+}
+
+static void mlx5_tracer_clean_message(struct tracer_string_format *str_frmt)
+{
+ hlist_del(&str_frmt->hlist);
+ kfree(str_frmt);
+}
+
+static int mlx5_tracer_get_num_of_params(char *str)
+{
+ char *substr, *pstr = str;
+ int num_of_params = 0;
+
+ /* replace %llx with %x%x */
+ substr = strstr(pstr, VAL_PARM);
+ while (substr) {
+ memcpy(substr, REPLACE_64_VAL_PARM, 4);
+ pstr = substr;
+ substr = strstr(pstr, VAL_PARM);
+ }
+
+ /* count all the % characters */
+ substr = strstr(str, PARAM_CHAR);
+ while (substr) {
+ num_of_params += 1;
+ str = substr + 1;
+ substr = strstr(str, PARAM_CHAR);
+ }
+
+ return num_of_params;
+}
+
+static struct tracer_string_format *mlx5_tracer_message_find(struct hlist_head *head,
+ u8 event_id, u32 tmsn)
+{
+ struct tracer_string_format *message;
+
+ hlist_for_each_entry(message, head, hlist)
+ if (message->event_id == event_id && message->tmsn == tmsn)
+ return message;
+
+ return NULL;
+}
+
+static struct tracer_string_format *mlx5_tracer_message_get(struct mlx5_fw_tracer *tracer,
+ struct tracer_event *tracer_event)
+{
+ struct hlist_head *head =
+ &tracer->hash[mlx5_tracer_message_hash(tracer_event->string_event.tmsn)];
+
+ return mlx5_tracer_message_find(head, tracer_event->event_id, tracer_event->string_event.tmsn);
+}
+
+static void poll_trace(struct mlx5_fw_tracer *tracer,
+ struct tracer_event *tracer_event, u64 *trace)
+{
+ u32 timestamp_low, timestamp_mid, timestamp_high, urts;
+
+ tracer_event->event_id = MLX5_GET(tracer_event, trace, event_id);
+ tracer_event->lost_event = MLX5_GET(tracer_event, trace, lost);
+
+ switch (tracer_event->event_id) {
+ case TRACER_EVENT_TYPE_TIMESTAMP:
+ tracer_event->type = TRACER_EVENT_TYPE_TIMESTAMP;
+ urts = MLX5_GET(tracer_timestamp_event, trace, urts);
+ if (tracer->trc_ver == 0)
+ tracer_event->timestamp_event.unreliable = !!(urts >> 2);
+ else
+ tracer_event->timestamp_event.unreliable = !!(urts & 1);
+
+ timestamp_low = MLX5_GET(tracer_timestamp_event,
+ trace, timestamp7_0);
+ timestamp_mid = MLX5_GET(tracer_timestamp_event,
+ trace, timestamp39_8);
+ timestamp_high = MLX5_GET(tracer_timestamp_event,
+ trace, timestamp52_40);
+
+ tracer_event->timestamp_event.timestamp =
+ ((u64)timestamp_high << 40) |
+ ((u64)timestamp_mid << 8) |
+ (u64)timestamp_low;
+ break;
+ default:
+ if (tracer_event->event_id >= tracer->str_db.first_string_trace ||
+ tracer_event->event_id <= tracer->str_db.first_string_trace +
+ tracer->str_db.num_string_trace) {
+ tracer_event->type = TRACER_EVENT_TYPE_STRING;
+ tracer_event->string_event.timestamp =
+ MLX5_GET(tracer_string_event, trace, timestamp);
+ tracer_event->string_event.string_param =
+ MLX5_GET(tracer_string_event, trace, string_param);
+ tracer_event->string_event.tmsn =
+ MLX5_GET(tracer_string_event, trace, tmsn);
+ tracer_event->string_event.tdsn =
+ MLX5_GET(tracer_string_event, trace, tdsn);
+ } else {
+ tracer_event->type = TRACER_EVENT_TYPE_UNRECOGNIZED;
+ }
+ break;
+ }
+}
+
+static u64 get_block_timestamp(struct mlx5_fw_tracer *tracer, u64 *ts_event)
+{
+ struct tracer_event tracer_event;
+ u8 event_id;
+
+ event_id = MLX5_GET(tracer_event, ts_event, event_id);
+
+ if (event_id == TRACER_EVENT_TYPE_TIMESTAMP)
+ poll_trace(tracer, &tracer_event, ts_event);
+ else
+ tracer_event.timestamp_event.timestamp = 0;
+
+ return tracer_event.timestamp_event.timestamp;
+}
+
+static void mlx5_fw_tracer_clean_print_hash(struct mlx5_fw_tracer *tracer)
+{
+ struct tracer_string_format *str_frmt;
+ struct hlist_node *n;
+ int i;
+
+ for (i = 0; i < MESSAGE_HASH_SIZE; i++) {
+ hlist_for_each_entry_safe(str_frmt, n, &tracer->hash[i], hlist)
+ mlx5_tracer_clean_message(str_frmt);
+ }
+}
+
+static void mlx5_fw_tracer_clean_ready_list(struct mlx5_fw_tracer *tracer)
+{
+ struct tracer_string_format *str_frmt, *tmp_str;
+
+ list_for_each_entry_safe(str_frmt, tmp_str, &tracer->ready_strings_list,
+ list)
+ list_del(&str_frmt->list);
+}
+
+static void mlx5_tracer_print_trace(struct tracer_string_format *str_frmt,
+ struct mlx5_core_dev *dev,
+ u64 trace_timestamp)
+{
+ char tmp[512];
+
+ snprintf(tmp, sizeof(tmp), str_frmt->string,
+ str_frmt->params[0],
+ str_frmt->params[1],
+ str_frmt->params[2],
+ str_frmt->params[3],
+ str_frmt->params[4],
+ str_frmt->params[5],
+ str_frmt->params[6]);
+
+ trace_mlx5_fw(dev->tracer, trace_timestamp, str_frmt->lost,
+ str_frmt->event_id, tmp);
+
+ /* remove it from hash */
+ mlx5_tracer_clean_message(str_frmt);
+}
+
+static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer,
+ struct tracer_event *tracer_event)
+{
+ struct tracer_string_format *cur_string;
+
+ if (tracer_event->string_event.tdsn == 0) {
+ cur_string = mlx5_tracer_get_string(tracer, tracer_event);
+ if (!cur_string)
+ return -1;
+
+ cur_string->num_of_params = mlx5_tracer_get_num_of_params(cur_string->string);
+ cur_string->last_param_num = 0;
+ cur_string->event_id = tracer_event->event_id;
+ cur_string->tmsn = tracer_event->string_event.tmsn;
+ cur_string->timestamp = tracer_event->string_event.timestamp;
+ cur_string->lost = tracer_event->lost_event;
+ if (cur_string->num_of_params == 0) /* trace with no params */
+ list_add_tail(&cur_string->list, &tracer->ready_strings_list);
+ } else {
+ cur_string = mlx5_tracer_message_get(tracer, tracer_event);
+ if (!cur_string) {
+ pr_debug("%s Got string event for unknown string tdsm: %d\n",
+ __func__, tracer_event->string_event.tmsn);
+ return -1;
+ }
+ cur_string->last_param_num += 1;
+ if (cur_string->last_param_num > TRACER_MAX_PARAMS) {
+ pr_debug("%s Number of params exceeds the max (%d)\n",
+ __func__, TRACER_MAX_PARAMS);
+ list_add_tail(&cur_string->list, &tracer->ready_strings_list);
+ return 0;
+ }
+ /* keep the new parameter */
+ cur_string->params[cur_string->last_param_num - 1] =
+ tracer_event->string_event.string_param;
+ if (cur_string->last_param_num == cur_string->num_of_params)
+ list_add_tail(&cur_string->list, &tracer->ready_strings_list);
+ }
+
+ return 0;
+}
+
+static void mlx5_tracer_handle_timestamp_trace(struct mlx5_fw_tracer *tracer,
+ struct tracer_event *tracer_event)
+{
+ struct tracer_timestamp_event timestamp_event =
+ tracer_event->timestamp_event;
+ struct tracer_string_format *str_frmt, *tmp_str;
+ struct mlx5_core_dev *dev = tracer->dev;
+ u64 trace_timestamp;
+
+ list_for_each_entry_safe(str_frmt, tmp_str, &tracer->ready_strings_list, list) {
+ list_del(&str_frmt->list);
+ if (str_frmt->timestamp < (timestamp_event.timestamp & MASK_6_0))
+ trace_timestamp = (timestamp_event.timestamp & MASK_52_7) |
+ (str_frmt->timestamp & MASK_6_0);
+ else
+ trace_timestamp = ((timestamp_event.timestamp & MASK_52_7) - 1) |
+ (str_frmt->timestamp & MASK_6_0);
+
+ mlx5_tracer_print_trace(str_frmt, dev, trace_timestamp);
+ }
+}
+
+static int mlx5_tracer_handle_trace(struct mlx5_fw_tracer *tracer,
+ struct tracer_event *tracer_event)
+{
+ if (tracer_event->type == TRACER_EVENT_TYPE_STRING) {
+ mlx5_tracer_handle_string_trace(tracer, tracer_event);
+ } else if (tracer_event->type == TRACER_EVENT_TYPE_TIMESTAMP) {
+ if (!tracer_event->timestamp_event.unreliable)
+ mlx5_tracer_handle_timestamp_trace(tracer, tracer_event);
+ } else {
+ pr_debug("%s Got unrecognised type %d for parsing, exiting..\n",
+ __func__, tracer_event->type);
+ }
+ return 0;
+}
+
+static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
+{
+ struct mlx5_fw_tracer *tracer =
+ container_of(work, struct mlx5_fw_tracer, handle_traces_work);
+ u64 block_timestamp, last_block_timestamp, tmp_trace_block[TRACES_PER_BLOCK];
+ u32 block_count, start_offset, prev_start_offset, prev_consumer_index;
+ u32 trace_event_size = MLX5_ST_SZ_BYTES(tracer_event);
+ struct mlx5_core_dev *dev = tracer->dev;
+ struct tracer_event tracer_event;
+ int i;
+
+ mlx5_core_dbg(dev, "FWTracer: Handle Trace event, owner=(%d)\n", tracer->owner);
+ if (!tracer->owner)
+ return;
+
+ block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE;
+ start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE;
+
+ /* Copy the block to local buffer to avoid HW override while being processed*/
+ memcpy(tmp_trace_block, tracer->buff.log_buf + start_offset,
+ TRACER_BLOCK_SIZE_BYTE);
+
+ block_timestamp =
+ get_block_timestamp(tracer, &tmp_trace_block[TRACES_PER_BLOCK - 1]);
+
+ while (block_timestamp > tracer->last_timestamp) {
+ /* Check block override if its not the first block */
+ if (!tracer->last_timestamp) {
+ u64 *ts_event;
+ /* To avoid block override be the HW in case of buffer
+ * wraparound, the time stamp of the previous block
+ * should be compared to the last timestamp handled
+ * by the driver.
+ */
+ prev_consumer_index =
+ (tracer->buff.consumer_index - 1) & (block_count - 1);
+ prev_start_offset = prev_consumer_index * TRACER_BLOCK_SIZE_BYTE;
+
+ ts_event = tracer->buff.log_buf + prev_start_offset +
+ (TRACES_PER_BLOCK - 1) * trace_event_size;
+ last_block_timestamp = get_block_timestamp(tracer, ts_event);
+ /* If previous timestamp different from last stored
+ * timestamp then there is a good chance that the
+ * current buffer is overwritten and therefore should
+ * not be parsed.
+ */
+ if (tracer->last_timestamp != last_block_timestamp) {
+ mlx5_core_warn(dev, "FWTracer: Events were lost\n");
+ tracer->last_timestamp = block_timestamp;
+ tracer->buff.consumer_index =
+ (tracer->buff.consumer_index + 1) & (block_count - 1);
+ break;
+ }
+ }
+
+ /* Parse events */
+ for (i = 0; i < TRACES_PER_BLOCK ; i++) {
+ poll_trace(tracer, &tracer_event, &tmp_trace_block[i]);
+ mlx5_tracer_handle_trace(tracer, &tracer_event);
+ }
+
+ tracer->buff.consumer_index =
+ (tracer->buff.consumer_index + 1) & (block_count - 1);
+
+ tracer->last_timestamp = block_timestamp;
+ start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE;
+ memcpy(tmp_trace_block, tracer->buff.log_buf + start_offset,
+ TRACER_BLOCK_SIZE_BYTE);
+ block_timestamp = get_block_timestamp(tracer,
+ &tmp_trace_block[TRACES_PER_BLOCK - 1]);
+ }
+
+ mlx5_fw_tracer_arm(dev);
+}
+
+static int mlx5_fw_tracer_set_mtrc_conf(struct mlx5_fw_tracer *tracer)
+{
+ struct mlx5_core_dev *dev = tracer->dev;
+ u32 out[MLX5_ST_SZ_DW(mtrc_conf)] = {0};
+ u32 in[MLX5_ST_SZ_DW(mtrc_conf)] = {0};
+ int err;
+
+ MLX5_SET(mtrc_conf, in, trace_mode, TRACE_TO_MEMORY);
+ MLX5_SET(mtrc_conf, in, log_trace_buffer_size,
+ ilog2(TRACER_BUFFER_PAGE_NUM));
+ MLX5_SET(mtrc_conf, in, trace_mkey, tracer->buff.mkey.key);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MTRC_CONF, 0, 1);
+ if (err)
+ mlx5_core_warn(dev, "FWTracer: Failed to set tracer configurations %d\n", err);
+
+ return err;
+}
+
+static int mlx5_fw_tracer_set_mtrc_ctrl(struct mlx5_fw_tracer *tracer, u8 status, u8 arm)
+{
+ struct mlx5_core_dev *dev = tracer->dev;
+ u32 out[MLX5_ST_SZ_DW(mtrc_ctrl)] = {0};
+ u32 in[MLX5_ST_SZ_DW(mtrc_ctrl)] = {0};
+ int err;
+
+ MLX5_SET(mtrc_ctrl, in, modify_field_select, TRACE_STATUS);
+ MLX5_SET(mtrc_ctrl, in, trace_status, status);
+ MLX5_SET(mtrc_ctrl, in, arm_event, arm);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MTRC_CTRL, 0, 1);
+
+ if (!err && status)
+ tracer->last_timestamp = 0;
+
+ return err;
+}
+
+static int mlx5_fw_tracer_start(struct mlx5_fw_tracer *tracer)
+{
+ struct mlx5_core_dev *dev = tracer->dev;
+ int err;
+
+ err = mlx5_fw_tracer_ownership_acquire(tracer);
+ if (err) {
+ mlx5_core_dbg(dev, "FWTracer: Ownership was not granted %d\n", err);
+ /* Don't fail since ownership can be acquired on a later FW event */
+ return 0;
+ }
+
+ err = mlx5_fw_tracer_set_mtrc_conf(tracer);
+ if (err) {
+ mlx5_core_warn(dev, "FWTracer: Failed to set tracer configuration %d\n", err);
+ goto release_ownership;
+ }
+
+ /* enable tracer & trace events */
+ err = mlx5_fw_tracer_set_mtrc_ctrl(tracer, 1, 1);
+ if (err) {
+ mlx5_core_warn(dev, "FWTracer: Failed to enable tracer %d\n", err);
+ goto release_ownership;
+ }
+
+ mlx5_core_dbg(dev, "FWTracer: Ownership granted and active\n");
+ return 0;
+
+release_ownership:
+ mlx5_fw_tracer_ownership_release(tracer);
+ return err;
+}
+
+static void mlx5_fw_tracer_ownership_change(struct work_struct *work)
+{
+ struct mlx5_fw_tracer *tracer =
+ container_of(work, struct mlx5_fw_tracer, ownership_change_work);
+
+ mlx5_core_dbg(tracer->dev, "FWTracer: ownership changed, current=(%d)\n", tracer->owner);
+ if (tracer->owner) {
+ tracer->owner = false;
+ tracer->buff.consumer_index = 0;
+ return;
+ }
+
+ mlx5_fw_tracer_start(tracer);
+}
+
+/* Create software resources (Buffers, etc ..) */
+struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fw_tracer *tracer = NULL;
+ int err;
+
+ if (!MLX5_CAP_MCAM_REG(dev, tracer_registers)) {
+ mlx5_core_dbg(dev, "FWTracer: Tracer capability not present\n");
+ return NULL;
+ }
+
+ tracer = kzalloc(sizeof(*tracer), GFP_KERNEL);
+ if (!tracer)
+ return ERR_PTR(-ENOMEM);
+
+ tracer->work_queue = create_singlethread_workqueue("mlx5_fw_tracer");
+ if (!tracer->work_queue) {
+ err = -ENOMEM;
+ goto free_tracer;
+ }
+
+ tracer->dev = dev;
+
+ INIT_LIST_HEAD(&tracer->ready_strings_list);
+ INIT_WORK(&tracer->ownership_change_work, mlx5_fw_tracer_ownership_change);
+ INIT_WORK(&tracer->read_fw_strings_work, mlx5_tracer_read_strings_db);
+ INIT_WORK(&tracer->handle_traces_work, mlx5_fw_tracer_handle_traces);
+
+
+ err = mlx5_query_mtrc_caps(tracer);
+ if (err) {
+ mlx5_core_dbg(dev, "FWTracer: Failed to query capabilities %d\n", err);
+ goto destroy_workqueue;
+ }
+
+ err = mlx5_fw_tracer_create_log_buf(tracer);
+ if (err) {
+ mlx5_core_warn(dev, "FWTracer: Create log buffer failed %d\n", err);
+ goto destroy_workqueue;
+ }
+
+ err = mlx5_fw_tracer_allocate_strings_db(tracer);
+ if (err) {
+ mlx5_core_warn(dev, "FWTracer: Allocate strings database failed %d\n", err);
+ goto free_log_buf;
+ }
+
+ mlx5_core_dbg(dev, "FWTracer: Tracer created\n");
+
+ return tracer;
+
+free_log_buf:
+ mlx5_fw_tracer_destroy_log_buf(tracer);
+destroy_workqueue:
+ tracer->dev = NULL;
+ destroy_workqueue(tracer->work_queue);
+free_tracer:
+ kfree(tracer);
+ return ERR_PTR(err);
+}
+
+/* Create HW resources + start tracer
+ * must be called before Async EQ is created
+ */
+int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
+{
+ struct mlx5_core_dev *dev;
+ int err;
+
+ if (IS_ERR_OR_NULL(tracer))
+ return 0;
+
+ dev = tracer->dev;
+
+ if (!tracer->str_db.loaded)
+ queue_work(tracer->work_queue, &tracer->read_fw_strings_work);
+
+ err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn);
+ if (err) {
+ mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err);
+ return err;
+ }
+
+ err = mlx5_fw_tracer_create_mkey(tracer);
+ if (err) {
+ mlx5_core_warn(dev, "FWTracer: Failed to create mkey %d\n", err);
+ goto err_dealloc_pd;
+ }
+
+ mlx5_fw_tracer_start(tracer);
+
+ return 0;
+
+err_dealloc_pd:
+ mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
+ return err;
+}
+
+/* Stop tracer + Cleanup HW resources
+ * must be called after Async EQ is destroyed
+ */
+void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer)
+{
+ if (IS_ERR_OR_NULL(tracer))
+ return;
+
+ mlx5_core_dbg(tracer->dev, "FWTracer: Cleanup, is owner ? (%d)\n",
+ tracer->owner);
+
+ cancel_work_sync(&tracer->ownership_change_work);
+ cancel_work_sync(&tracer->handle_traces_work);
+
+ if (tracer->owner)
+ mlx5_fw_tracer_ownership_release(tracer);
+
+ mlx5_core_destroy_mkey(tracer->dev, &tracer->buff.mkey);
+ mlx5_core_dealloc_pd(tracer->dev, tracer->buff.pdn);
+}
+
+/* Free software resources (Buffers, etc ..) */
+void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer)
+{
+ if (IS_ERR_OR_NULL(tracer))
+ return;
+
+ mlx5_core_dbg(tracer->dev, "FWTracer: Destroy\n");
+
+ cancel_work_sync(&tracer->read_fw_strings_work);
+ mlx5_fw_tracer_clean_ready_list(tracer);
+ mlx5_fw_tracer_clean_print_hash(tracer);
+ mlx5_fw_tracer_free_strings_db(tracer);
+ mlx5_fw_tracer_destroy_log_buf(tracer);
+ flush_workqueue(tracer->work_queue);
+ destroy_workqueue(tracer->work_queue);
+ kfree(tracer);
+}
+
+void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
+{
+ struct mlx5_fw_tracer *tracer = dev->tracer;
+
+ if (!tracer)
+ return;
+
+ switch (eqe->sub_type) {
+ case MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE:
+ if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state))
+ queue_work(tracer->work_queue, &tracer->ownership_change_work);
+ break;
+ case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE:
+ if (likely(tracer->str_db.loaded))
+ queue_work(tracer->work_queue, &tracer->handle_traces_work);
+ break;
+ default:
+ mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n",
+ eqe->sub_type);
+ }
+}
+
+EXPORT_TRACEPOINT_SYMBOL(mlx5_fw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
new file mode 100644
index 000000000000..0347f2dd5cee
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __LIB_TRACER_H__
+#define __LIB_TRACER_H__
+
+#include <linux/mlx5/driver.h>
+#include "mlx5_core.h"
+
+#define STRINGS_DB_SECTIONS_NUM 8
+#define STRINGS_DB_READ_SIZE_BYTES 256
+#define STRINGS_DB_LEFTOVER_SIZE_BYTES 64
+#define TRACER_BUFFER_PAGE_NUM 64
+#define TRACER_BUFFER_CHUNK 4096
+#define TRACE_BUFFER_SIZE_BYTE (TRACER_BUFFER_PAGE_NUM * TRACER_BUFFER_CHUNK)
+
+#define TRACER_BLOCK_SIZE_BYTE 256
+#define TRACES_PER_BLOCK 32
+
+#define TRACER_MAX_PARAMS 7
+#define MESSAGE_HASH_BITS 6
+#define MESSAGE_HASH_SIZE BIT(MESSAGE_HASH_BITS)
+
+#define MASK_52_7 (0x1FFFFFFFFFFF80)
+#define MASK_6_0 (0x7F)
+
+struct mlx5_fw_tracer {
+ struct mlx5_core_dev *dev;
+ bool owner;
+ u8 trc_ver;
+ struct workqueue_struct *work_queue;
+ struct work_struct ownership_change_work;
+ struct work_struct read_fw_strings_work;
+
+ /* Strings DB */
+ struct {
+ u8 first_string_trace;
+ u8 num_string_trace;
+ u32 num_string_db;
+ u32 base_address_out[STRINGS_DB_SECTIONS_NUM];
+ u32 size_out[STRINGS_DB_SECTIONS_NUM];
+ void *buffer[STRINGS_DB_SECTIONS_NUM];
+ bool loaded;
+ } str_db;
+
+ /* Log Buffer */
+ struct {
+ u32 pdn;
+ void *log_buf;
+ dma_addr_t dma;
+ u32 size;
+ struct mlx5_core_mkey mkey;
+ u32 consumer_index;
+ } buff;
+
+ u64 last_timestamp;
+ struct work_struct handle_traces_work;
+ struct hlist_head hash[MESSAGE_HASH_SIZE];
+ struct list_head ready_strings_list;
+};
+
+struct tracer_string_format {
+ char *string;
+ int params[TRACER_MAX_PARAMS];
+ int num_of_params;
+ int last_param_num;
+ u8 event_id;
+ u32 tmsn;
+ struct hlist_node hlist;
+ struct list_head list;
+ u32 timestamp;
+ bool lost;
+};
+
+enum mlx5_fw_tracer_ownership_state {
+ MLX5_FW_TRACER_RELEASE_OWNERSHIP,
+ MLX5_FW_TRACER_ACQUIRE_OWNERSHIP,
+};
+
+enum tracer_ctrl_fields_select {
+ TRACE_STATUS = 1 << 0,
+};
+
+enum tracer_event_type {
+ TRACER_EVENT_TYPE_STRING,
+ TRACER_EVENT_TYPE_TIMESTAMP = 0xFF,
+ TRACER_EVENT_TYPE_UNRECOGNIZED,
+};
+
+enum tracing_mode {
+ TRACE_TO_MEMORY = 1 << 0,
+};
+
+struct tracer_timestamp_event {
+ u64 timestamp;
+ u8 unreliable;
+};
+
+struct tracer_string_event {
+ u32 timestamp;
+ u32 tmsn;
+ u32 tdsn;
+ u32 string_param;
+};
+
+struct tracer_event {
+ bool lost_event;
+ u32 type;
+ u8 event_id;
+ union {
+ struct tracer_string_event string_event;
+ struct tracer_timestamp_event timestamp_event;
+ };
+};
+
+struct mlx5_ifc_tracer_event_bits {
+ u8 lost[0x1];
+ u8 timestamp[0x7];
+ u8 event_id[0x8];
+ u8 event_data[0x30];
+};
+
+struct mlx5_ifc_tracer_string_event_bits {
+ u8 lost[0x1];
+ u8 timestamp[0x7];
+ u8 event_id[0x8];
+ u8 tmsn[0xd];
+ u8 tdsn[0x3];
+ u8 string_param[0x20];
+};
+
+struct mlx5_ifc_tracer_timestamp_event_bits {
+ u8 timestamp7_0[0x8];
+ u8 event_id[0x8];
+ u8 urts[0x3];
+ u8 timestamp52_40[0xd];
+ u8 timestamp39_8[0x20];
+};
+
+struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev);
+int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer);
+void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer);
+void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer);
+void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h
new file mode 100644
index 000000000000..83f90e9aff45
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if !defined(__LIB_TRACER_TRACEPOINT_H__) || defined(TRACE_HEADER_MULTI_READ)
+#define __LIB_TRACER_TRACEPOINT_H__
+
+#include <linux/tracepoint.h>
+#include "fw_tracer.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mlx5
+
+/* Tracepoint for FWTracer messages: */
+TRACE_EVENT(mlx5_fw,
+ TP_PROTO(const struct mlx5_fw_tracer *tracer, u64 trace_timestamp,
+ bool lost, u8 event_id, const char *msg),
+
+ TP_ARGS(tracer, trace_timestamp, lost, event_id, msg),
+
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name(&tracer->dev->pdev->dev))
+ __field(u64, trace_timestamp)
+ __field(bool, lost)
+ __field(u8, event_id)
+ __string(msg, msg)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev_name, dev_name(&tracer->dev->pdev->dev));
+ __entry->trace_timestamp = trace_timestamp;
+ __entry->lost = lost;
+ __entry->event_id = event_id;
+ __assign_str(msg, msg);
+ ),
+
+ TP_printk("%s [0x%llx] %d [0x%x] %s",
+ __get_str(dev_name),
+ __entry->trace_timestamp,
+ __entry->lost, __entry->event_id,
+ __get_str(msg))
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH ./diag
+#define TRACE_INCLUDE_FILE fw_tracer_tracepoint
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 405236cf0b04..db2cfcd21d43 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -52,6 +52,7 @@
#include "wq.h"
#include "mlx5_core.h"
#include "en_stats.h"
+#include "en/fs.h"
struct page_pool;
@@ -137,7 +138,6 @@ struct page_pool;
#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1)
#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_TX_CQ_POLL_BUDGET 128
-#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
#define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
#define MLX5E_UMR_WQE_INLINE_SZ \
@@ -148,10 +148,6 @@ struct page_pool;
(DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
#define MLX5E_ICOSQ_MAX_WQEBBS MLX5E_UMR_WQEBBS
-#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
-#define MLX5E_XDP_TX_DS_COUNT \
- ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
-
#define MLX5E_NUM_MAIN_GROUPS 9
#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
@@ -349,6 +345,7 @@ enum {
MLX5E_SQ_STATE_IPSEC,
MLX5E_SQ_STATE_AM,
MLX5E_SQ_STATE_TLS,
+ MLX5E_SQ_STATE_REDIRECT,
};
struct mlx5e_sq_wqe_info {
@@ -369,16 +366,14 @@ struct mlx5e_txqsq {
struct mlx5e_cq cq;
- /* write@xmit, read@completion */
- struct {
- struct mlx5e_sq_dma *dma_fifo;
- struct mlx5e_tx_wqe_info *wqe_info;
- } db;
-
/* read only */
struct mlx5_wq_cyc wq;
u32 dma_fifo_mask;
struct mlx5e_sq_stats *stats;
+ struct {
+ struct mlx5e_sq_dma *dma_fifo;
+ struct mlx5e_tx_wqe_info *wqe_info;
+ } db;
void __iomem *uar_map;
struct netdev_queue *txq;
u32 sqn;
@@ -400,30 +395,43 @@ struct mlx5e_txqsq {
} recover;
} ____cacheline_aligned_in_smp;
+struct mlx5e_dma_info {
+ struct page *page;
+ dma_addr_t addr;
+};
+
+struct mlx5e_xdp_info {
+ struct xdp_frame *xdpf;
+ dma_addr_t dma_addr;
+ struct mlx5e_dma_info di;
+};
+
struct mlx5e_xdpsq {
/* data path */
- /* dirtied @rx completion */
+ /* dirtied @completion */
u16 cc;
- u16 pc;
+ bool redirect_flush;
- struct mlx5e_cq cq;
+ /* dirtied @xmit */
+ u16 pc ____cacheline_aligned_in_smp;
+ bool doorbell;
- /* write@xmit, read@completion */
- struct {
- struct mlx5e_dma_info *di;
- bool doorbell;
- bool redirect_flush;
- } db;
+ struct mlx5e_cq cq;
/* read only */
struct mlx5_wq_cyc wq;
+ struct mlx5e_xdpsq_stats *stats;
+ struct {
+ struct mlx5e_xdp_info *xdpi;
+ } db;
void __iomem *uar_map;
u32 sqn;
struct device *pdev;
__be32 mkey_be;
u8 min_inline_mode;
unsigned long state;
+ unsigned int hw_mtu;
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
@@ -460,11 +468,6 @@ mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
}
-struct mlx5e_dma_info {
- struct page *page;
- dma_addr_t addr;
-};
-
struct mlx5e_wqe_frag_info {
struct mlx5e_dma_info *di;
u32 offset;
@@ -567,7 +570,6 @@ struct mlx5e_rq {
/* XDP */
struct bpf_prog *xdp_prog;
- unsigned int hw_mtu;
struct mlx5e_xdpsq xdpsq;
DECLARE_BITMAP(flags, 8);
struct page_pool *page_pool;
@@ -596,6 +598,9 @@ struct mlx5e_channel {
__be32 mkey_be;
u8 num_tc;
+ /* XDP_REDIRECT */
+ struct mlx5e_xdpsq xdpsq;
+
/* data path - accessed per napi poll */
struct irq_desc *irq_desc;
struct mlx5e_ch_stats *stats;
@@ -618,159 +623,16 @@ struct mlx5e_channel_stats {
struct mlx5e_ch_stats ch;
struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
struct mlx5e_rq_stats rq;
+ struct mlx5e_xdpsq_stats rq_xdpsq;
+ struct mlx5e_xdpsq_stats xdpsq;
} ____cacheline_aligned_in_smp;
-enum mlx5e_traffic_types {
- MLX5E_TT_IPV4_TCP,
- MLX5E_TT_IPV6_TCP,
- MLX5E_TT_IPV4_UDP,
- MLX5E_TT_IPV6_UDP,
- MLX5E_TT_IPV4_IPSEC_AH,
- MLX5E_TT_IPV6_IPSEC_AH,
- MLX5E_TT_IPV4_IPSEC_ESP,
- MLX5E_TT_IPV6_IPSEC_ESP,
- MLX5E_TT_IPV4,
- MLX5E_TT_IPV6,
- MLX5E_TT_ANY,
- MLX5E_NUM_TT,
- MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY,
-};
-
-enum mlx5e_tunnel_types {
- MLX5E_TT_IPV4_GRE,
- MLX5E_TT_IPV6_GRE,
- MLX5E_NUM_TUNNEL_TT,
-};
-
enum {
MLX5E_STATE_ASYNC_EVENTS_ENABLED,
MLX5E_STATE_OPENED,
MLX5E_STATE_DESTROYING,
};
-struct mlx5e_vxlan_db {
- spinlock_t lock; /* protect vxlan table */
- struct radix_tree_root tree;
-};
-
-struct mlx5e_l2_rule {
- u8 addr[ETH_ALEN + 2];
- struct mlx5_flow_handle *rule;
-};
-
-struct mlx5e_flow_table {
- int num_groups;
- struct mlx5_flow_table *t;
- struct mlx5_flow_group **g;
-};
-
-#define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE)
-
-struct mlx5e_tc_table {
- struct mlx5_flow_table *t;
-
- struct rhashtable ht;
-
- DECLARE_HASHTABLE(mod_hdr_tbl, 8);
- DECLARE_HASHTABLE(hairpin_tbl, 8);
-};
-
-struct mlx5e_vlan_table {
- struct mlx5e_flow_table ft;
- DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
- DECLARE_BITMAP(active_svlans, VLAN_N_VID);
- struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID];
- struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID];
- struct mlx5_flow_handle *untagged_rule;
- struct mlx5_flow_handle *any_cvlan_rule;
- struct mlx5_flow_handle *any_svlan_rule;
- bool cvlan_filter_disabled;
-};
-
-struct mlx5e_l2_table {
- struct mlx5e_flow_table ft;
- struct hlist_head netdev_uc[MLX5E_L2_ADDR_HASH_SIZE];
- struct hlist_head netdev_mc[MLX5E_L2_ADDR_HASH_SIZE];
- struct mlx5e_l2_rule broadcast;
- struct mlx5e_l2_rule allmulti;
- struct mlx5e_l2_rule promisc;
- bool broadcast_enabled;
- bool allmulti_enabled;
- bool promisc_enabled;
-};
-
-/* L3/L4 traffic type classifier */
-struct mlx5e_ttc_table {
- struct mlx5e_flow_table ft;
- struct mlx5_flow_handle *rules[MLX5E_NUM_TT];
- struct mlx5_flow_handle *tunnel_rules[MLX5E_NUM_TUNNEL_TT];
-};
-
-#define ARFS_HASH_SHIFT BITS_PER_BYTE
-#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
-struct arfs_table {
- struct mlx5e_flow_table ft;
- struct mlx5_flow_handle *default_rule;
- struct hlist_head rules_hash[ARFS_HASH_SIZE];
-};
-
-enum arfs_type {
- ARFS_IPV4_TCP,
- ARFS_IPV6_TCP,
- ARFS_IPV4_UDP,
- ARFS_IPV6_UDP,
- ARFS_NUM_TYPES,
-};
-
-struct mlx5e_arfs_tables {
- struct arfs_table arfs_tables[ARFS_NUM_TYPES];
- /* Protect aRFS rules list */
- spinlock_t arfs_lock;
- struct list_head rules;
- int last_filter_id;
- struct workqueue_struct *wq;
-};
-
-/* NIC prio FTS */
-enum {
- MLX5E_VLAN_FT_LEVEL = 0,
- MLX5E_L2_FT_LEVEL,
- MLX5E_TTC_FT_LEVEL,
- MLX5E_INNER_TTC_FT_LEVEL,
- MLX5E_ARFS_FT_LEVEL
-};
-
-enum {
- MLX5E_TC_FT_LEVEL = 0,
- MLX5E_TC_TTC_FT_LEVEL,
-};
-
-struct mlx5e_ethtool_table {
- struct mlx5_flow_table *ft;
- int num_rules;
-};
-
-#define ETHTOOL_NUM_L3_L4_FTS 7
-#define ETHTOOL_NUM_L2_FTS 4
-
-struct mlx5e_ethtool_steering {
- struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
- struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS];
- struct list_head rules;
- int tot_num_rules;
-};
-
-struct mlx5e_flow_steering {
- struct mlx5_flow_namespace *ns;
- struct mlx5e_ethtool_steering ethtool;
- struct mlx5e_tc_table tc;
- struct mlx5e_vlan_table vlan;
- struct mlx5e_l2_table l2;
- struct mlx5e_ttc_table ttc;
- struct mlx5e_ttc_table inner_ttc;
- struct mlx5e_arfs_tables arfs;
-};
-
struct mlx5e_rqt {
u32 rqtn;
bool enabled;
@@ -810,7 +672,6 @@ struct mlx5e_priv {
u32 tx_rates[MLX5E_MAX_NUM_SQS];
struct mlx5e_flow_steering fs;
- struct mlx5e_vxlan_db vxlan;
struct workqueue_struct *wq;
struct work_struct update_carrier_work;
@@ -864,7 +725,8 @@ struct mlx5e_profile {
void mlx5e_build_ptys2ethtool_map(void);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback);
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe *wqe, u16 pi);
@@ -874,14 +736,13 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
-bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
-void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
+void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
bool recycle);
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
@@ -890,7 +751,6 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
-void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx);
@@ -906,23 +766,10 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
void mlx5e_update_stats(struct mlx5e_priv *priv);
-int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
-void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
-void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
int mlx5e_self_test_num(struct mlx5e_priv *priv);
void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
u64 *buf);
-int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
- int location);
-int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
- struct ethtool_rxnfc *info, u32 *rule_locs);
-int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
- struct ethtool_rx_flow_spec *fs);
-int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv,
- int location);
-void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
-void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
void mlx5e_set_rx_mode_work(struct work_struct *work);
int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
@@ -933,8 +780,6 @@ int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
-void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
-void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
void mlx5e_timestamp_init(struct mlx5e_priv *priv);
struct mlx5e_redirect_rqt_param {
@@ -1051,32 +896,6 @@ void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv);
void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv);
#endif
-#ifndef CONFIG_RFS_ACCEL
-static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
-{
- return 0;
-}
-
-static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
-
-static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
-{
- return -EOPNOTSUPP;
-}
-#else
-int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
-void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
-int mlx5e_arfs_enable(struct mlx5e_priv *priv);
-int mlx5e_arfs_disable(struct mlx5e_priv *priv);
-int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
- u16 rxq_index, u32 flow_id);
-#endif
-
int mlx5e_create_tir(struct mlx5_core_dev *mdev,
struct mlx5e_tir *tir, u32 *in, int inlen);
void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
@@ -1097,27 +916,6 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
-struct ttc_params {
- struct mlx5_flow_table_attr ft_attr;
- u32 any_tt_tirn;
- u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
- struct mlx5e_ttc_table *inner_ttc;
-};
-
-void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, struct ttc_params *ttc_params);
-void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params);
-void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params);
-
-int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
- struct mlx5e_ttc_table *ttc);
-void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
- struct mlx5e_ttc_table *ttc);
-
-int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
- struct mlx5e_ttc_table *ttc);
-void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
- struct mlx5e_ttc_table *ttc);
-
int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
u32 underlay_qpn, u32 *tisn);
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
new file mode 100644
index 000000000000..bbf69e859b78
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2018 Mellanox Technologies. */
+
+#ifndef __MLX5E_FLOW_STEER_H__
+#define __MLX5E_FLOW_STEER_H__
+
+enum {
+ MLX5E_TC_FT_LEVEL = 0,
+ MLX5E_TC_TTC_FT_LEVEL,
+};
+
+struct mlx5e_tc_table {
+ struct mlx5_flow_table *t;
+
+ struct rhashtable ht;
+
+ DECLARE_HASHTABLE(mod_hdr_tbl, 8);
+ DECLARE_HASHTABLE(hairpin_tbl, 8);
+};
+
+struct mlx5e_flow_table {
+ int num_groups;
+ struct mlx5_flow_table *t;
+ struct mlx5_flow_group **g;
+};
+
+struct mlx5e_l2_rule {
+ u8 addr[ETH_ALEN + 2];
+ struct mlx5_flow_handle *rule;
+};
+
+#define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE)
+
+struct mlx5e_vlan_table {
+ struct mlx5e_flow_table ft;
+ DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
+ DECLARE_BITMAP(active_svlans, VLAN_N_VID);
+ struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID];
+ struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID];
+ struct mlx5_flow_handle *untagged_rule;
+ struct mlx5_flow_handle *any_cvlan_rule;
+ struct mlx5_flow_handle *any_svlan_rule;
+ bool cvlan_filter_disabled;
+};
+
+struct mlx5e_l2_table {
+ struct mlx5e_flow_table ft;
+ struct hlist_head netdev_uc[MLX5E_L2_ADDR_HASH_SIZE];
+ struct hlist_head netdev_mc[MLX5E_L2_ADDR_HASH_SIZE];
+ struct mlx5e_l2_rule broadcast;
+ struct mlx5e_l2_rule allmulti;
+ struct mlx5e_l2_rule promisc;
+ bool broadcast_enabled;
+ bool allmulti_enabled;
+ bool promisc_enabled;
+};
+
+enum mlx5e_traffic_types {
+ MLX5E_TT_IPV4_TCP,
+ MLX5E_TT_IPV6_TCP,
+ MLX5E_TT_IPV4_UDP,
+ MLX5E_TT_IPV6_UDP,
+ MLX5E_TT_IPV4_IPSEC_AH,
+ MLX5E_TT_IPV6_IPSEC_AH,
+ MLX5E_TT_IPV4_IPSEC_ESP,
+ MLX5E_TT_IPV6_IPSEC_ESP,
+ MLX5E_TT_IPV4,
+ MLX5E_TT_IPV6,
+ MLX5E_TT_ANY,
+ MLX5E_NUM_TT,
+ MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY,
+};
+
+enum mlx5e_tunnel_types {
+ MLX5E_TT_IPV4_GRE,
+ MLX5E_TT_IPV6_GRE,
+ MLX5E_NUM_TUNNEL_TT,
+};
+
+/* L3/L4 traffic type classifier */
+struct mlx5e_ttc_table {
+ struct mlx5e_flow_table ft;
+ struct mlx5_flow_handle *rules[MLX5E_NUM_TT];
+ struct mlx5_flow_handle *tunnel_rules[MLX5E_NUM_TUNNEL_TT];
+};
+
+/* NIC prio FTS */
+enum {
+ MLX5E_VLAN_FT_LEVEL = 0,
+ MLX5E_L2_FT_LEVEL,
+ MLX5E_TTC_FT_LEVEL,
+ MLX5E_INNER_TTC_FT_LEVEL,
+#ifdef CONFIG_MLX5_EN_ARFS
+ MLX5E_ARFS_FT_LEVEL
+#endif
+};
+
+#ifdef CONFIG_MLX5_EN_RXNFC
+
+struct mlx5e_ethtool_table {
+ struct mlx5_flow_table *ft;
+ int num_rules;
+};
+
+#define ETHTOOL_NUM_L3_L4_FTS 7
+#define ETHTOOL_NUM_L2_FTS 4
+
+struct mlx5e_ethtool_steering {
+ struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
+ struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS];
+ struct list_head rules;
+ int tot_num_rules;
+};
+
+void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
+void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
+int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
+int mlx5e_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info, u32 *rule_locs);
+#else
+static inline void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) { }
+static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv) { }
+#endif /* CONFIG_MLX5_EN_RXNFC */
+
+#ifdef CONFIG_MLX5_EN_ARFS
+#define ARFS_HASH_SHIFT BITS_PER_BYTE
+#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
+
+struct arfs_table {
+ struct mlx5e_flow_table ft;
+ struct mlx5_flow_handle *default_rule;
+ struct hlist_head rules_hash[ARFS_HASH_SIZE];
+};
+
+enum arfs_type {
+ ARFS_IPV4_TCP,
+ ARFS_IPV6_TCP,
+ ARFS_IPV4_UDP,
+ ARFS_IPV6_UDP,
+ ARFS_NUM_TYPES,
+};
+
+struct mlx5e_arfs_tables {
+ struct arfs_table arfs_tables[ARFS_NUM_TYPES];
+ /* Protect aRFS rules list */
+ spinlock_t arfs_lock;
+ struct list_head rules;
+ int last_filter_id;
+ struct workqueue_struct *wq;
+};
+
+int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
+void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
+int mlx5e_arfs_enable(struct mlx5e_priv *priv);
+int mlx5e_arfs_disable(struct mlx5e_priv *priv);
+int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id);
+#else
+static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv) { return 0; }
+static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
+static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv) { return -EOPNOTSUPP; }
+static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) { return -EOPNOTSUPP; }
+#endif
+
+struct mlx5e_flow_steering {
+ struct mlx5_flow_namespace *ns;
+#ifdef CONFIG_MLX5_EN_RXNFC
+ struct mlx5e_ethtool_steering ethtool;
+#endif
+ struct mlx5e_tc_table tc;
+ struct mlx5e_vlan_table vlan;
+ struct mlx5e_l2_table l2;
+ struct mlx5e_ttc_table ttc;
+ struct mlx5e_ttc_table inner_ttc;
+#ifdef CONFIG_MLX5_EN_ARFS
+ struct mlx5e_arfs_tables arfs;
+#endif
+};
+
+struct ttc_params {
+ struct mlx5_flow_table_attr ft_attr;
+ u32 any_tt_tirn;
+ u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
+ struct mlx5e_ttc_table *inner_ttc;
+};
+
+void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, struct ttc_params *ttc_params);
+void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params);
+void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params);
+
+int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc);
+void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
+ struct mlx5e_ttc_table *ttc);
+
+int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc);
+void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
+ struct mlx5e_ttc_table *ttc);
+
+void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
+
+void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
+void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
+
+int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
+void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
+
+#endif /* __MLX5E_FLOW_STEER_H__ */
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
new file mode 100644
index 000000000000..ad6d471d00dd
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/bpf_trace.h>
+#include "en/xdp.h"
+
+static inline bool
+mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di,
+ struct xdp_buff *xdp)
+{
+ struct mlx5e_xdp_info xdpi;
+
+ xdpi.xdpf = convert_to_xdp_frame(xdp);
+ if (unlikely(!xdpi.xdpf))
+ return false;
+ xdpi.dma_addr = di->addr + (xdpi.xdpf->data - (void *)xdpi.xdpf);
+ dma_sync_single_for_device(sq->pdev, xdpi.dma_addr,
+ xdpi.xdpf->len, PCI_DMA_TODEVICE);
+ xdpi.di = *di;
+
+ return mlx5e_xmit_xdp_frame(sq, &xdpi);
+}
+
+/* returns true if packet was consumed by xdp */
+bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
+ void *va, u16 *rx_headroom, u32 *len)
+{
+ struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
+ struct xdp_buff xdp;
+ u32 act;
+ int err;
+
+ if (!prog)
+ return false;
+
+ xdp.data = va + *rx_headroom;
+ xdp_set_data_meta_invalid(&xdp);
+ xdp.data_end = xdp.data + *len;
+ xdp.data_hard_start = va;
+ xdp.rxq = &rq->xdp_rxq;
+
+ act = bpf_prog_run_xdp(prog, &xdp);
+ switch (act) {
+ case XDP_PASS:
+ *rx_headroom = xdp.data - xdp.data_hard_start;
+ *len = xdp.data_end - xdp.data;
+ return false;
+ case XDP_TX:
+ if (unlikely(!mlx5e_xmit_xdp_buff(&rq->xdpsq, di, &xdp)))
+ goto xdp_abort;
+ __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
+ return true;
+ case XDP_REDIRECT:
+ /* When XDP enabled then page-refcnt==1 here */
+ err = xdp_do_redirect(rq->netdev, &xdp, prog);
+ if (unlikely(err))
+ goto xdp_abort;
+ __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
+ rq->xdpsq.redirect_flush = true;
+ mlx5e_page_dma_unmap(rq, di);
+ rq->stats->xdp_redirect++;
+ return true;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ /* fall through */
+ case XDP_ABORTED:
+xdp_abort:
+ trace_xdp_exception(rq->netdev, prog, act);
+ /* fall through */
+ case XDP_DROP:
+ rq->stats->xdp_drop++;
+ return true;
+ }
+}
+
+bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
+ struct mlx5_wqe_data_seg *dseg = wqe->data;
+
+ struct xdp_frame *xdpf = xdpi->xdpf;
+ dma_addr_t dma_addr = xdpi->dma_addr;
+ unsigned int dma_len = xdpf->len;
+
+ struct mlx5e_xdpsq_stats *stats = sq->stats;
+
+ prefetchw(wqe);
+
+ if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || sq->hw_mtu < dma_len)) {
+ stats->err++;
+ return false;
+ }
+
+ if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) {
+ if (sq->doorbell) {
+ /* SQ is full, ring doorbell */
+ mlx5e_xmit_xdp_doorbell(sq);
+ sq->doorbell = false;
+ }
+ stats->full++;
+ return false;
+ }
+
+ cseg->fm_ce_se = 0;
+
+ /* copy the inline part if required */
+ if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
+ memcpy(eseg->inline_hdr.start, xdpf->data, MLX5E_XDP_MIN_INLINE);
+ eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
+ dma_len -= MLX5E_XDP_MIN_INLINE;
+ dma_addr += MLX5E_XDP_MIN_INLINE;
+ dseg++;
+ }
+
+ /* write the dma part */
+ dseg->addr = cpu_to_be64(dma_addr);
+ dseg->byte_count = cpu_to_be32(dma_len);
+
+ cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
+
+ /* move page to reference to sq responsibility,
+ * and mark so it's not put back in page-cache.
+ */
+ sq->db.xdpi[pi] = *xdpi;
+ sq->pc++;
+
+ sq->doorbell = true;
+
+ stats->xmit++;
+ return true;
+}
+
+bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
+{
+ struct mlx5e_xdpsq *sq;
+ struct mlx5_cqe64 *cqe;
+ struct mlx5e_rq *rq;
+ bool is_redirect;
+ u16 sqcc;
+ int i;
+
+ sq = container_of(cq, struct mlx5e_xdpsq, cq);
+
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
+ return false;
+
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
+ if (!cqe)
+ return false;
+
+ is_redirect = test_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state);
+ rq = container_of(sq, struct mlx5e_rq, xdpsq);
+
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ sqcc = sq->cc;
+
+ i = 0;
+ do {
+ u16 wqe_counter;
+ bool last_wqe;
+
+ mlx5_cqwq_pop(&cq->wq);
+
+ wqe_counter = be16_to_cpu(cqe->wqe_counter);
+
+ do {
+ u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
+ struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci];
+
+ last_wqe = (sqcc == wqe_counter);
+ sqcc++;
+
+ if (is_redirect) {
+ xdp_return_frame(xdpi->xdpf);
+ dma_unmap_single(sq->pdev, xdpi->dma_addr,
+ xdpi->xdpf->len, DMA_TO_DEVICE);
+ } else {
+ /* Recycle RX page */
+ mlx5e_page_release(rq, &xdpi->di, true);
+ }
+ } while (!last_wqe);
+ } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
+
+ sq->stats->cqes += i;
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ sq->cc = sqcc;
+ return (i == MLX5E_TX_CQ_POLL_BUDGET);
+}
+
+void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
+{
+ struct mlx5e_rq *rq;
+ bool is_redirect;
+
+ is_redirect = test_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state);
+ rq = is_redirect ? NULL : container_of(sq, struct mlx5e_rq, xdpsq);
+
+ while (sq->cc != sq->pc) {
+ u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
+ struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci];
+
+ sq->cc++;
+
+ if (is_redirect) {
+ xdp_return_frame(xdpi->xdpf);
+ dma_unmap_single(sq->pdev, xdpi->dma_addr,
+ xdpi->xdpf->len, DMA_TO_DEVICE);
+ } else {
+ /* Recycle RX page */
+ mlx5e_page_release(rq, &xdpi->di, false);
+ }
+ }
+}
+
+int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_xdpsq *sq;
+ int drops = 0;
+ int sq_num;
+ int i;
+
+ if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state)))
+ return -ENETDOWN;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ sq_num = smp_processor_id();
+
+ if (unlikely(sq_num >= priv->channels.num))
+ return -ENXIO;
+
+ sq = &priv->channels.c[sq_num]->xdpsq;
+
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
+ return -ENETDOWN;
+
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *xdpf = frames[i];
+ struct mlx5e_xdp_info xdpi;
+
+ xdpi.dma_addr = dma_map_single(sq->pdev, xdpf->data, xdpf->len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(sq->pdev, xdpi.dma_addr))) {
+ xdp_return_frame_rx_napi(xdpf);
+ drops++;
+ continue;
+ }
+
+ xdpi.xdpf = xdpf;
+
+ if (unlikely(!mlx5e_xmit_xdp_frame(sq, &xdpi))) {
+ dma_unmap_single(sq->pdev, xdpi.dma_addr,
+ xdpf->len, DMA_TO_DEVICE);
+ xdp_return_frame_rx_napi(xdpf);
+ drops++;
+ }
+ }
+
+ if (flags & XDP_XMIT_FLUSH)
+ mlx5e_xmit_xdp_doorbell(sq);
+
+ return n - drops;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
new file mode 100644
index 000000000000..6dfab045925f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __MLX5_EN_XDP_H__
+#define __MLX5_EN_XDP_H__
+
+#include "en.h"
+
+#define MLX5E_XDP_MAX_MTU ((int)(PAGE_SIZE - \
+ MLX5_SKB_FRAG_SZ(XDP_PACKET_HEADROOM)))
+#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
+#define MLX5E_XDP_TX_DS_COUNT \
+ ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
+
+bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
+ void *va, u16 *rx_headroom, u32 *len);
+bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
+void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
+
+bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi);
+int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
+
+static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ struct mlx5e_tx_wqe *wqe;
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc - 1); /* last pi */
+
+ wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+
+ mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &wqe->ctrl);
+}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index f20074dbef32..1dd225380a66 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -34,19 +34,26 @@
#ifndef __MLX5E_EN_ACCEL_H__
#define __MLX5E_EN_ACCEL_H__
-#ifdef CONFIG_MLX5_ACCEL
-
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/tls_rxtx.h"
#include "en.h"
-static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb,
- struct mlx5e_txqsq *sq,
- struct net_device *dev,
- struct mlx5e_tx_wqe **wqe,
- u16 *pi)
+static inline void
+mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
+{
+ int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
+
+ udp_hdr(skb)->len = htons(payload_len);
+}
+
+static inline struct sk_buff *
+mlx5e_accel_handle_tx(struct sk_buff *skb,
+ struct mlx5e_txqsq *sq,
+ struct net_device *dev,
+ struct mlx5e_tx_wqe **wqe,
+ u16 *pi)
{
#ifdef CONFIG_MLX5_EN_TLS
if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) {
@@ -64,9 +71,10 @@ static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb,
}
#endif
+ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+ mlx5e_udp_gso_handle_tx_skb(skb);
+
return skb;
}
-#endif /* CONFIG_MLX5_ACCEL */
-
#endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index c245d8e78509..128a82b1dbfc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -37,6 +37,7 @@
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/ipsec.h"
+#include "accel/accel.h"
#include "en.h"
enum {
@@ -346,19 +347,12 @@ mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb,
}
struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
- struct sk_buff *skb)
+ struct sk_buff *skb, u32 *cqe_bcnt)
{
struct mlx5e_ipsec_metadata *mdata;
- struct ethhdr *old_eth;
- struct ethhdr *new_eth;
struct xfrm_state *xs;
- __be16 *ethtype;
- /* Detect inline metadata */
- if (skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN)
- return skb;
- ethtype = (__be16 *)(skb->data + ETH_ALEN * 2);
- if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE))
+ if (!is_metadata_hdr_valid(skb))
return skb;
/* Use the metadata */
@@ -369,12 +363,8 @@ struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
return NULL;
}
- /* Remove the metadata from the buffer */
- old_eth = (struct ethhdr *)skb->data;
- new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN);
- memmove(new_eth, old_eth, 2 * ETH_ALEN);
- /* Ethertype is already in its new place */
- skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN);
+ remove_metadata_hdr(skb);
+ *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
return skb;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 2bfbbef1b054..ca47c0540904 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -41,7 +41,7 @@
#include "en.h"
struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
- struct sk_buff *skb);
+ struct sk_buff *skb, u32 *cqe_bcnt);
void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_ipsec_inverse_table_init(void);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
index d167845271c3..eddd7702680b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
@@ -110,9 +110,7 @@ static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk,
u32 caps = mlx5_accel_tls_device_caps(mdev);
int ret = -ENOMEM;
void *flow;
-
- if (direction != TLS_OFFLOAD_CTX_DIR_TX)
- return -EINVAL;
+ u32 swid;
flow = kzalloc(MLX5_ST_SZ_BYTES(tls_flow), GFP_KERNEL);
if (!flow)
@@ -122,18 +120,23 @@ static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk,
if (ret)
goto free_flow;
+ ret = mlx5_accel_tls_add_flow(mdev, flow, crypto_info,
+ start_offload_tcp_sn, &swid,
+ direction == TLS_OFFLOAD_CTX_DIR_TX);
+ if (ret < 0)
+ goto free_flow;
+
if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
- struct mlx5e_tls_offload_context *tx_ctx =
+ struct mlx5e_tls_offload_context_tx *tx_ctx =
mlx5e_get_tls_tx_context(tls_ctx);
- u32 swid;
-
- ret = mlx5_accel_tls_add_tx_flow(mdev, flow, crypto_info,
- start_offload_tcp_sn, &swid);
- if (ret < 0)
- goto free_flow;
tx_ctx->swid = htonl(swid);
tx_ctx->expected_seq = start_offload_tcp_sn;
+ } else {
+ struct mlx5e_tls_offload_context_rx *rx_ctx =
+ mlx5e_get_tls_rx_context(tls_ctx);
+
+ rx_ctx->handle = htonl(swid);
}
return 0;
@@ -147,30 +150,60 @@ static void mlx5e_tls_del(struct net_device *netdev,
enum tls_offload_ctx_dir direction)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ unsigned int handle;
- if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
- u32 swid = ntohl(mlx5e_get_tls_tx_context(tls_ctx)->swid);
+ handle = ntohl((direction == TLS_OFFLOAD_CTX_DIR_TX) ?
+ mlx5e_get_tls_tx_context(tls_ctx)->swid :
+ mlx5e_get_tls_rx_context(tls_ctx)->handle);
- mlx5_accel_tls_del_tx_flow(priv->mdev, swid);
- } else {
- netdev_err(netdev, "unsupported direction %d\n", direction);
- }
+ mlx5_accel_tls_del_flow(priv->mdev, handle,
+ direction == TLS_OFFLOAD_CTX_DIR_TX);
+}
+
+static void mlx5e_tls_resync_rx(struct net_device *netdev, struct sock *sk,
+ u32 seq, u64 rcd_sn)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_tls_offload_context_rx *rx_ctx;
+
+ rx_ctx = mlx5e_get_tls_rx_context(tls_ctx);
+
+ netdev_info(netdev, "resyncing seq %d rcd %lld\n", seq,
+ be64_to_cpu(rcd_sn));
+ mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn);
+ atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_reply);
}
static const struct tlsdev_ops mlx5e_tls_ops = {
.tls_dev_add = mlx5e_tls_add,
.tls_dev_del = mlx5e_tls_del,
+ .tls_dev_resync_rx = mlx5e_tls_resync_rx,
};
void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
{
+ u32 caps = mlx5_accel_tls_device_caps(priv->mdev);
struct net_device *netdev = priv->netdev;
if (!mlx5_accel_is_tls_device(priv->mdev))
return;
- netdev->features |= NETIF_F_HW_TLS_TX;
- netdev->hw_features |= NETIF_F_HW_TLS_TX;
+ if (caps & MLX5_ACCEL_TLS_TX) {
+ netdev->features |= NETIF_F_HW_TLS_TX;
+ netdev->hw_features |= NETIF_F_HW_TLS_TX;
+ }
+
+ if (caps & MLX5_ACCEL_TLS_RX) {
+ netdev->features |= NETIF_F_HW_TLS_RX;
+ netdev->hw_features |= NETIF_F_HW_TLS_RX;
+ }
+
+ if (!(caps & MLX5_ACCEL_TLS_LRO)) {
+ netdev->features &= ~NETIF_F_LRO;
+ netdev->hw_features &= ~NETIF_F_LRO;
+ }
+
netdev->tlsdev_ops = &mlx5e_tls_ops;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
index b6162178f621..3f5d72163b56 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
@@ -43,25 +43,44 @@ struct mlx5e_tls_sw_stats {
atomic64_t tx_tls_drop_resync_alloc;
atomic64_t tx_tls_drop_no_sync_data;
atomic64_t tx_tls_drop_bypass_required;
+ atomic64_t rx_tls_drop_resync_request;
+ atomic64_t rx_tls_resync_request;
+ atomic64_t rx_tls_resync_reply;
+ atomic64_t rx_tls_auth_fail;
};
struct mlx5e_tls {
struct mlx5e_tls_sw_stats sw_stats;
};
-struct mlx5e_tls_offload_context {
- struct tls_offload_context base;
+struct mlx5e_tls_offload_context_tx {
+ struct tls_offload_context_tx base;
u32 expected_seq;
__be32 swid;
};
-static inline struct mlx5e_tls_offload_context *
+static inline struct mlx5e_tls_offload_context_tx *
mlx5e_get_tls_tx_context(struct tls_context *tls_ctx)
{
- BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context) >
- TLS_OFFLOAD_CONTEXT_SIZE);
- return container_of(tls_offload_ctx(tls_ctx),
- struct mlx5e_tls_offload_context,
+ BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_tx) >
+ TLS_OFFLOAD_CONTEXT_SIZE_TX);
+ return container_of(tls_offload_ctx_tx(tls_ctx),
+ struct mlx5e_tls_offload_context_tx,
+ base);
+}
+
+struct mlx5e_tls_offload_context_rx {
+ struct tls_offload_context_rx base;
+ __be32 handle;
+};
+
+static inline struct mlx5e_tls_offload_context_rx *
+mlx5e_get_tls_rx_context(struct tls_context *tls_ctx)
+{
+ BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_rx) >
+ TLS_OFFLOAD_CONTEXT_SIZE_RX);
+ return container_of(tls_offload_ctx_rx(tls_ctx),
+ struct mlx5e_tls_offload_context_rx,
base);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
index 15aef71d1957..be137d4a9169 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -33,6 +33,14 @@
#include "en_accel/tls.h"
#include "en_accel/tls_rxtx.h"
+#include "accel/accel.h"
+
+#include <net/inet6_hashtables.h>
+#include <linux/ipv6.h>
+
+#define SYNDROM_DECRYPTED 0x30
+#define SYNDROM_RESYNC_REQUEST 0x31
+#define SYNDROM_AUTH_FAILED 0x32
#define SYNDROME_OFFLOAD_REQUIRED 32
#define SYNDROME_SYNC 33
@@ -44,10 +52,26 @@ struct sync_info {
skb_frag_t frags[MAX_SKB_FRAGS];
};
-struct mlx5e_tls_metadata {
+struct recv_metadata_content {
+ u8 syndrome;
+ u8 reserved;
+ __be32 sync_seq;
+} __packed;
+
+struct send_metadata_content {
/* One byte of syndrome followed by 3 bytes of swid */
__be32 syndrome_swid;
__be16 first_seq;
+} __packed;
+
+struct mlx5e_tls_metadata {
+ union {
+ /* from fpga to host */
+ struct recv_metadata_content recv;
+ /* from host to fpga */
+ struct send_metadata_content send;
+ unsigned char raw[6];
+ } __packed content;
/* packet type ID field */
__be16 ethertype;
} __packed;
@@ -68,12 +92,13 @@ static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid)
2 * ETH_ALEN);
eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
- pet->syndrome_swid = htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid;
+ pet->content.send.syndrome_swid =
+ htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid;
return 0;
}
-static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context *context,
+static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context_tx *context,
u32 tcp_seq, struct sync_info *info)
{
int remaining, i = 0, ret = -EINVAL;
@@ -149,7 +174,7 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr));
memcpy(pet, &syndrome, sizeof(syndrome));
- pet->first_seq = htons(tcp_seq);
+ pet->content.send.first_seq = htons(tcp_seq);
/* MLX5 devices don't care about the checksum partial start, offset
* and pseudo header
@@ -161,7 +186,7 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
}
static struct sk_buff *
-mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context,
+mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe **wqe,
u16 *pi,
@@ -239,7 +264,7 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
u16 *pi)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_tls_offload_context *context;
+ struct mlx5e_tls_offload_context_tx *context;
struct tls_context *tls_ctx;
u32 expected_seq;
int datalen;
@@ -276,3 +301,83 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
out:
return skb;
}
+
+static int tls_update_resync_sn(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5e_tls_metadata *mdata)
+{
+ struct sock *sk = NULL;
+ struct iphdr *iph;
+ struct tcphdr *th;
+ __be32 seq;
+
+ if (mdata->ethertype != htons(ETH_P_IP))
+ return -EINVAL;
+
+ iph = (struct iphdr *)(mdata + 1);
+
+ th = ((void *)iph) + iph->ihl * 4;
+
+ if (iph->version == 4) {
+ sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ iph->saddr, th->source, iph->daddr,
+ th->dest, netdev->ifindex);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph;
+
+ sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ &ipv6h->saddr, th->source,
+ &ipv6h->daddr, ntohs(th->dest),
+ netdev->ifindex, 0);
+#endif
+ }
+ if (!sk || sk->sk_state == TCP_TIME_WAIT) {
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ atomic64_inc(&priv->tls->sw_stats.rx_tls_drop_resync_request);
+ goto out;
+ }
+
+ skb->sk = sk;
+ skb->destructor = sock_edemux;
+
+ memcpy(&seq, &mdata->content.recv.sync_seq, sizeof(seq));
+ tls_offload_rx_resync_request(sk, seq);
+out:
+ return 0;
+}
+
+void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
+ u32 *cqe_bcnt)
+{
+ struct mlx5e_tls_metadata *mdata;
+ struct mlx5e_priv *priv;
+
+ if (!is_metadata_hdr_valid(skb))
+ return;
+
+ /* Use the metadata */
+ mdata = (struct mlx5e_tls_metadata *)(skb->data + ETH_HLEN);
+ switch (mdata->content.recv.syndrome) {
+ case SYNDROM_DECRYPTED:
+ skb->decrypted = 1;
+ break;
+ case SYNDROM_RESYNC_REQUEST:
+ tls_update_resync_sn(netdev, skb, mdata);
+ priv = netdev_priv(netdev);
+ atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_request);
+ break;
+ case SYNDROM_AUTH_FAILED:
+ /* Authentication failure will be observed and verified by kTLS */
+ priv = netdev_priv(netdev);
+ atomic64_inc(&priv->tls->sw_stats.rx_tls_auth_fail);
+ break;
+ default:
+ /* Bypass the metadata header to others */
+ return;
+ }
+
+ remove_metadata_hdr(skb);
+ *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
index 405dfd302225..311667ec71b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
@@ -45,6 +45,9 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
struct mlx5e_tx_wqe **wqe,
u16 *pi);
+void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
+ u32 *cqe_bcnt);
+
#endif /* CONFIG_MLX5_EN_TLS */
#endif /* __MLX5E_TLS_RXTX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index d258bb679271..45cdde694d20 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -30,8 +30,6 @@
* SOFTWARE.
*/
-#ifdef CONFIG_RFS_ACCEL
-
#include <linux/hash.h>
#include <linux/mlx5/fs.h>
#include <linux/ip.h>
@@ -738,4 +736,4 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
spin_unlock_bh(&arfs->arfs_lock);
return arfs_rule->filter_id;
}
-#endif
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index fffe514ba855..98dd3e0ada72 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -32,6 +32,7 @@
#include "en.h"
#include "en/port.h"
+#include "lib/clock.h"
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo)
@@ -969,33 +970,6 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
return 0;
}
-static int mlx5e_get_rxnfc(struct net_device *netdev,
- struct ethtool_rxnfc *info, u32 *rule_locs)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- int err = 0;
-
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = priv->channels.params.num_channels;
- break;
- case ETHTOOL_GRXCLSRLCNT:
- info->rule_cnt = priv->fs.ethtool.tot_num_rules;
- break;
- case ETHTOOL_GRXCLSRULE:
- err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
- break;
- case ETHTOOL_GRXCLSRLALL:
- err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
#define MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC 100
#define MLX5E_PFC_PREVEN_TOUT_MAX_MSEC 8000
#define MLX5E_PFC_PREVEN_MINOR_PRECENT 85
@@ -1133,10 +1107,10 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
if (ret)
return ret;
- info->phc_index = mdev->clock.ptp ?
- ptp_clock_index(mdev->clock.ptp) : -1;
+ info->phc_index = mlx5_clock_get_ptp_index(mdev);
- if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
+ if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
+ info->phc_index == -1)
return 0;
info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
@@ -1606,26 +1580,6 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev)
return priv->channels.params.pflags;
}
-static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
-{
- int err = 0;
- struct mlx5e_priv *priv = netdev_priv(dev);
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXCLSRLINS:
- err = mlx5e_ethtool_flow_replace(priv, &cmd->fs);
- break;
- case ETHTOOL_SRXCLSRLDEL:
- err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
struct ethtool_flash *flash)
{
@@ -1678,8 +1632,10 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
.get_rxfh = mlx5e_get_rxfh,
.set_rxfh = mlx5e_set_rxfh,
+#ifdef CONFIG_MLX5_EN_RXNFC
.get_rxnfc = mlx5e_get_rxnfc,
.set_rxnfc = mlx5e_set_rxnfc,
+#endif
.flash_device = mlx5e_flash_device,
.get_tunable = mlx5e_get_tunable,
.set_tunable = mlx5e_set_tunable,
@@ -1696,5 +1652,4 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.self_test = mlx5e_self_test,
.get_msglevel = mlx5e_get_msglevel,
.set_msglevel = mlx5e_set_msglevel,
-
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index eafc59280ada..75bb981e00b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -66,11 +66,14 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
case TCP_V4_FLOW:
case UDP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
break;
case IP_USER_FLOW:
+ case IPV6_USER_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
@@ -115,29 +118,203 @@ static void mask_spec(u8 *mask, u8 *val, size_t size)
*((u8 *)val) = *((u8 *)mask) & *((u8 *)val);
}
-static void set_ips(void *outer_headers_v, void *outer_headers_c, __be32 ip4src_m,
- __be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v)
+#define MLX5E_FTE_SET(header_p, fld, v) \
+ MLX5_SET(fte_match_set_lyr_2_4, header_p, fld, v)
+
+#define MLX5E_FTE_ADDR_OF(header_p, fld) \
+ MLX5_ADDR_OF(fte_match_set_lyr_2_4, header_p, fld)
+
+static void
+set_ip4(void *headers_c, void *headers_v, __be32 ip4src_m,
+ __be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v)
{
if (ip4src_m) {
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
- src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv4_layout.ipv4),
&ip4src_v, sizeof(ip4src_v));
- memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
- src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ memset(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4),
0xff, sizeof(ip4src_m));
}
if (ip4dst_m) {
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
- dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&ip4dst_v, sizeof(ip4dst_v));
- memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
- dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ memset(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
0xff, sizeof(ip4dst_m));
}
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
- ethertype, ETH_P_IP);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
- ethertype, 0xffff);
+
+ MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
+ MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IP);
+}
+
+static void
+set_ip6(void *headers_c, void *headers_v, __be32 ip6src_m[4],
+ __be32 ip6src_v[4], __be32 ip6dst_m[4], __be32 ip6dst_v[4])
+{
+ u8 ip6_sz = MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6);
+
+ if (!ipv6_addr_any((struct in6_addr *)ip6src_m)) {
+ memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ ip6src_v, ip6_sz);
+ memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ ip6src_m, ip6_sz);
+ }
+ if (!ipv6_addr_any((struct in6_addr *)ip6dst_m)) {
+ memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ ip6dst_v, ip6_sz);
+ memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ ip6dst_m, ip6_sz);
+ }
+
+ MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
+ MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IPV6);
+}
+
+static void
+set_tcp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
+ __be16 pdst_m, __be16 pdst_v)
+{
+ if (psrc_m) {
+ MLX5E_FTE_SET(headers_c, tcp_sport, 0xffff);
+ MLX5E_FTE_SET(headers_v, tcp_sport, ntohs(psrc_v));
+ }
+ if (pdst_m) {
+ MLX5E_FTE_SET(headers_c, tcp_dport, 0xffff);
+ MLX5E_FTE_SET(headers_v, tcp_dport, ntohs(pdst_v));
+ }
+
+ MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
+ MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_TCP);
+}
+
+static void
+set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
+ __be16 pdst_m, __be16 pdst_v)
+{
+ if (psrc_m) {
+ MLX5E_FTE_SET(headers_c, udp_sport, 0xffff);
+ MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_v));
+ }
+
+ if (pdst_m) {
+ MLX5E_FTE_SET(headers_c, udp_dport, 0xffff);
+ MLX5E_FTE_SET(headers_v, udp_dport, ntohs(pdst_v));
+ }
+
+ MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
+ MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_UDP);
+}
+
+static void
+parse_tcp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *l4_val = &fs->h_u.tcp_ip4_spec;
+
+ set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
+ l4_mask->ip4dst, l4_val->ip4dst);
+
+ set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
+ l4_mask->pdst, l4_val->pdst);
+}
+
+static void
+parse_udp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.udp_ip4_spec;
+ struct ethtool_tcpip4_spec *l4_val = &fs->h_u.udp_ip4_spec;
+
+ set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
+ l4_mask->ip4dst, l4_val->ip4dst);
+
+ set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
+ l4_mask->pdst, l4_val->pdst);
+}
+
+static void
+parse_ip4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
+ struct ethtool_usrip4_spec *l3_val = &fs->h_u.usr_ip4_spec;
+
+ set_ip4(headers_c, headers_v, l3_mask->ip4src, l3_val->ip4src,
+ l3_mask->ip4dst, l3_val->ip4dst);
+
+ if (l3_mask->proto) {
+ MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->proto);
+ MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->proto);
+ }
+}
+
+static void
+parse_ip6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
+ struct ethtool_usrip6_spec *l3_val = &fs->h_u.usr_ip6_spec;
+
+ set_ip6(headers_c, headers_v, l3_mask->ip6src,
+ l3_val->ip6src, l3_mask->ip6dst, l3_val->ip6dst);
+
+ if (l3_mask->l4_proto) {
+ MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->l4_proto);
+ MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->l4_proto);
+ }
+}
+
+static void
+parse_tcp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
+ struct ethtool_tcpip6_spec *l4_val = &fs->h_u.tcp_ip6_spec;
+
+ set_ip6(headers_c, headers_v, l4_mask->ip6src,
+ l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
+
+ set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
+ l4_mask->pdst, l4_val->pdst);
+}
+
+static void
+parse_udp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.udp_ip6_spec;
+ struct ethtool_tcpip6_spec *l4_val = &fs->h_u.udp_ip6_spec;
+
+ set_ip6(headers_c, headers_v, l4_mask->ip6src,
+ l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
+
+ set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
+ l4_mask->pdst, l4_val->pdst);
+}
+
+static void
+parse_ether(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
+{
+ struct ethhdr *eth_mask = &fs->m_u.ether_spec;
+ struct ethhdr *eth_val = &fs->h_u.ether_spec;
+
+ mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
+ ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, smac_47_16), eth_mask->h_source);
+ ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, smac_47_16), eth_val->h_source);
+ ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), eth_mask->h_dest);
+ ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), eth_val->h_dest);
+ MLX5E_FTE_SET(headers_c, ethertype, ntohs(eth_mask->h_proto));
+ MLX5E_FTE_SET(headers_v, ethertype, ntohs(eth_val->h_proto));
+}
+
+static void
+set_cvlan(void *headers_c, void *headers_v, __be16 vlan_tci)
+{
+ MLX5E_FTE_SET(headers_c, cvlan_tag, 1);
+ MLX5E_FTE_SET(headers_v, cvlan_tag, 1);
+ MLX5E_FTE_SET(headers_c, first_vid, 0xfff);
+ MLX5E_FTE_SET(headers_v, first_vid, ntohs(vlan_tci));
+}
+
+static void
+set_dmac(void *headers_c, void *headers_v,
+ unsigned char m_dest[ETH_ALEN], unsigned char v_dest[ETH_ALEN])
+{
+ ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), m_dest);
+ ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), v_dest);
}
static int set_flow_attrs(u32 *match_c, u32 *match_v,
@@ -148,112 +325,42 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v,
void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
outer_headers);
u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
- struct ethtool_tcpip4_spec *l4_mask;
- struct ethtool_tcpip4_spec *l4_val;
- struct ethtool_usrip4_spec *l3_mask;
- struct ethtool_usrip4_spec *l3_val;
- struct ethhdr *eth_val;
- struct ethhdr *eth_mask;
switch (flow_type) {
case TCP_V4_FLOW:
- l4_mask = &fs->m_u.tcp_ip4_spec;
- l4_val = &fs->h_u.tcp_ip4_spec;
- set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src,
- l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst);
-
- if (l4_mask->psrc) {
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport,
- 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport,
- ntohs(l4_val->psrc));
- }
- if (l4_mask->pdst) {
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport,
- 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport,
- ntohs(l4_val->pdst));
- }
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
- 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
- IPPROTO_TCP);
+ parse_tcp4(outer_headers_c, outer_headers_v, fs);
break;
case UDP_V4_FLOW:
- l4_mask = &fs->m_u.tcp_ip4_spec;
- l4_val = &fs->h_u.tcp_ip4_spec;
- set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src,
- l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst);
-
- if (l4_mask->psrc) {
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport,
- 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport,
- ntohs(l4_val->psrc));
- }
- if (l4_mask->pdst) {
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport,
- 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport,
- ntohs(l4_val->pdst));
- }
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
- 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
- IPPROTO_UDP);
+ parse_udp4(outer_headers_c, outer_headers_v, fs);
break;
case IP_USER_FLOW:
- l3_mask = &fs->m_u.usr_ip4_spec;
- l3_val = &fs->h_u.usr_ip4_spec;
- set_ips(outer_headers_v, outer_headers_c, l3_mask->ip4src,
- l3_val->ip4src, l3_mask->ip4dst, l3_val->ip4dst);
+ parse_ip4(outer_headers_c, outer_headers_v, fs);
+ break;
+ case TCP_V6_FLOW:
+ parse_tcp6(outer_headers_c, outer_headers_v, fs);
+ break;
+ case UDP_V6_FLOW:
+ parse_udp6(outer_headers_c, outer_headers_v, fs);
+ break;
+ case IPV6_USER_FLOW:
+ parse_ip6(outer_headers_c, outer_headers_v, fs);
break;
case ETHER_FLOW:
- eth_mask = &fs->m_u.ether_spec;
- eth_val = &fs->h_u.ether_spec;
-
- mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- outer_headers_c, smac_47_16),
- eth_mask->h_source);
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- outer_headers_v, smac_47_16),
- eth_val->h_source);
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- outer_headers_c, dmac_47_16),
- eth_mask->h_dest);
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- outer_headers_v, dmac_47_16),
- eth_val->h_dest);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ethertype,
- ntohs(eth_mask->h_proto));
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ethertype,
- ntohs(eth_val->h_proto));
+ parse_ether(outer_headers_c, outer_headers_v, fs);
break;
default:
return -EINVAL;
}
if ((fs->flow_type & FLOW_EXT) &&
- (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
- cvlan_tag, 1);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
- cvlan_tag, 1);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
- first_vid, 0xfff);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
- first_vid, ntohs(fs->h_ext.vlan_tci));
- }
+ (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)))
+ set_cvlan(outer_headers_c, outer_headers_v, fs->h_ext.vlan_tci);
+
if (fs->flow_type & FLOW_MAC_EXT &&
!is_zero_ether_addr(fs->m_ext.h_dest)) {
mask_spec(fs->m_ext.h_dest, fs->h_ext.h_dest, ETH_ALEN);
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- outer_headers_c, dmac_47_16),
- fs->m_ext.h_dest);
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- outer_headers_v, dmac_47_16),
- fs->h_ext.h_dest);
+ set_dmac(outer_headers_c, outer_headers_v, fs->m_ext.h_dest,
+ fs->h_ext.h_dest);
}
return 0;
@@ -379,16 +486,143 @@ static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv,
#define all_zeros_or_all_ones(field) \
((field) == 0 || (field) == (__force typeof(field))-1)
+static int validate_ethter(struct ethtool_rx_flow_spec *fs)
+{
+ struct ethhdr *eth_mask = &fs->m_u.ether_spec;
+ int ntuples = 0;
+
+ if (!is_zero_ether_addr(eth_mask->h_dest))
+ ntuples++;
+ if (!is_zero_ether_addr(eth_mask->h_source))
+ ntuples++;
+ if (eth_mask->h_proto)
+ ntuples++;
+ return ntuples;
+}
+
+static int validate_tcpudp4(struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
+ int ntuples = 0;
+
+ if (l4_mask->tos)
+ return -EINVAL;
+
+ if (l4_mask->ip4src) {
+ if (!all_ones(l4_mask->ip4src))
+ return -EINVAL;
+ ntuples++;
+ }
+ if (l4_mask->ip4dst) {
+ if (!all_ones(l4_mask->ip4dst))
+ return -EINVAL;
+ ntuples++;
+ }
+ if (l4_mask->psrc) {
+ if (!all_ones(l4_mask->psrc))
+ return -EINVAL;
+ ntuples++;
+ }
+ if (l4_mask->pdst) {
+ if (!all_ones(l4_mask->pdst))
+ return -EINVAL;
+ ntuples++;
+ }
+ /* Flow is TCP/UDP */
+ return ++ntuples;
+}
+
+static int validate_ip4(struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
+ int ntuples = 0;
+
+ if (l3_mask->l4_4_bytes || l3_mask->tos ||
+ fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
+ return -EINVAL;
+ if (l3_mask->ip4src) {
+ if (!all_ones(l3_mask->ip4src))
+ return -EINVAL;
+ ntuples++;
+ }
+ if (l3_mask->ip4dst) {
+ if (!all_ones(l3_mask->ip4dst))
+ return -EINVAL;
+ ntuples++;
+ }
+ if (l3_mask->proto)
+ ntuples++;
+ /* Flow is IPv4 */
+ return ++ntuples;
+}
+
+static int validate_ip6(struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
+ int ntuples = 0;
+
+ if (l3_mask->l4_4_bytes || l3_mask->tclass)
+ return -EINVAL;
+ if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6src))
+ ntuples++;
+
+ if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6dst))
+ ntuples++;
+ if (l3_mask->l4_proto)
+ ntuples++;
+ /* Flow is IPv6 */
+ return ++ntuples;
+}
+
+static int validate_tcpudp6(struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
+ int ntuples = 0;
+
+ if (l4_mask->tclass)
+ return -EINVAL;
+
+ if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6src))
+ ntuples++;
+
+ if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6dst))
+ ntuples++;
+
+ if (l4_mask->psrc) {
+ if (!all_ones(l4_mask->psrc))
+ return -EINVAL;
+ ntuples++;
+ }
+ if (l4_mask->pdst) {
+ if (!all_ones(l4_mask->pdst))
+ return -EINVAL;
+ ntuples++;
+ }
+ /* Flow is TCP/UDP */
+ return ++ntuples;
+}
+
+static int validate_vlan(struct ethtool_rx_flow_spec *fs)
+{
+ if (fs->m_ext.vlan_etype ||
+ fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
+ return -EINVAL;
+
+ if (fs->m_ext.vlan_tci &&
+ (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID))
+ return -EINVAL;
+
+ return 1;
+}
+
static int validate_flow(struct mlx5e_priv *priv,
struct ethtool_rx_flow_spec *fs)
{
- struct ethtool_tcpip4_spec *l4_mask;
- struct ethtool_usrip4_spec *l3_mask;
- struct ethhdr *eth_mask;
int num_tuples = 0;
+ int ret = 0;
if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
- return -EINVAL;
+ return -ENOSPC;
if (fs->ring_cookie >= priv->channels.params.num_channels &&
fs->ring_cookie != RX_CLS_FLOW_DISC)
@@ -396,73 +630,42 @@ static int validate_flow(struct mlx5e_priv *priv,
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
case ETHER_FLOW:
- eth_mask = &fs->m_u.ether_spec;
- if (!is_zero_ether_addr(eth_mask->h_dest))
- num_tuples++;
- if (!is_zero_ether_addr(eth_mask->h_source))
- num_tuples++;
- if (eth_mask->h_proto)
- num_tuples++;
+ num_tuples += validate_ethter(fs);
break;
case TCP_V4_FLOW:
case UDP_V4_FLOW:
- if (fs->m_u.tcp_ip4_spec.tos)
- return -EINVAL;
- l4_mask = &fs->m_u.tcp_ip4_spec;
- if (l4_mask->ip4src) {
- if (!all_ones(l4_mask->ip4src))
- return -EINVAL;
- num_tuples++;
- }
- if (l4_mask->ip4dst) {
- if (!all_ones(l4_mask->ip4dst))
- return -EINVAL;
- num_tuples++;
- }
- if (l4_mask->psrc) {
- if (!all_ones(l4_mask->psrc))
- return -EINVAL;
- num_tuples++;
- }
- if (l4_mask->pdst) {
- if (!all_ones(l4_mask->pdst))
- return -EINVAL;
- num_tuples++;
- }
- /* Flow is TCP/UDP */
- num_tuples++;
+ ret = validate_tcpudp4(fs);
+ if (ret < 0)
+ return ret;
+ num_tuples += ret;
break;
case IP_USER_FLOW:
- l3_mask = &fs->m_u.usr_ip4_spec;
- if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
- fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
- return -EINVAL;
- if (l3_mask->ip4src) {
- if (!all_ones(l3_mask->ip4src))
- return -EINVAL;
- num_tuples++;
- }
- if (l3_mask->ip4dst) {
- if (!all_ones(l3_mask->ip4dst))
- return -EINVAL;
- num_tuples++;
- }
- /* Flow is IPv4 */
- num_tuples++;
+ ret = validate_ip4(fs);
+ if (ret < 0)
+ return ret;
+ num_tuples += ret;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ ret = validate_tcpudp6(fs);
+ if (ret < 0)
+ return ret;
+ num_tuples += ret;
+ break;
+ case IPV6_USER_FLOW:
+ ret = validate_ip6(fs);
+ if (ret < 0)
+ return ret;
+ num_tuples += ret;
break;
default:
- return -EINVAL;
+ return -ENOTSUPP;
}
if ((fs->flow_type & FLOW_EXT)) {
- if (fs->m_ext.vlan_etype ||
- (fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK)))
- return -EINVAL;
-
- if (fs->m_ext.vlan_tci) {
- if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
- return -EINVAL;
- }
- num_tuples++;
+ ret = validate_vlan(fs);
+ if (ret < 0)
+ return ret;
+ num_tuples += ret;
}
if (fs->flow_type & FLOW_MAC_EXT &&
@@ -472,8 +675,9 @@ static int validate_flow(struct mlx5e_priv *priv,
return num_tuples;
}
-int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
- struct ethtool_rx_flow_spec *fs)
+static int
+mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
+ struct ethtool_rx_flow_spec *fs)
{
struct mlx5e_ethtool_table *eth_ft;
struct mlx5e_ethtool_rule *eth_rule;
@@ -483,8 +687,9 @@ int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
num_tuples = validate_flow(priv, fs);
if (num_tuples <= 0) {
- netdev_warn(priv->netdev, "%s: flow is not valid\n", __func__);
- return -EINVAL;
+ netdev_warn(priv->netdev, "%s: flow is not valid %d\n",
+ __func__, num_tuples);
+ return num_tuples;
}
eth_ft = get_flow_table(priv, fs, num_tuples);
@@ -519,8 +724,8 @@ del_ethtool_rule:
return err;
}
-int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv,
- int location)
+static int
+mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, int location)
{
struct mlx5e_ethtool_rule *eth_rule;
int err = 0;
@@ -539,8 +744,9 @@ out:
return err;
}
-int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
- int location)
+static int
+mlx5e_ethtool_get_flow(struct mlx5e_priv *priv,
+ struct ethtool_rxnfc *info, int location)
{
struct mlx5e_ethtool_rule *eth_rule;
@@ -557,8 +763,9 @@ int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
return -ENOENT;
}
-int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
- u32 *rule_locs)
+static int
+mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
+ struct ethtool_rxnfc *info, u32 *rule_locs)
{
int location = 0;
int idx = 0;
@@ -587,3 +794,51 @@ void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
{
INIT_LIST_HEAD(&priv->fs.ethtool.rules);
}
+
+int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ int err = 0;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ err = mlx5e_ethtool_flow_replace(priv, &cmd->fs);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+int mlx5e_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info, u32 *rule_locs)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int err = 0;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = priv->channels.params.num_channels;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ info->rule_cnt = priv->fs.ethtool.tot_num_rules;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index c592678ab5f1..5a7939e70190 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -45,8 +45,10 @@
#include "en_accel/tls.h"
#include "accel/ipsec.h"
#include "accel/tls.h"
-#include "vxlan.h"
+#include "lib/vxlan.h"
+#include "lib/clock.h"
#include "en/port.h"
+#include "en/xdp.h"
struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)];
@@ -96,14 +98,19 @@ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params)
{
- if (!params->xdp_prog) {
- u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- u16 rq_headroom = MLX5_RX_HEADROOM + NET_IP_ALIGN;
+ u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ u16 linear_rq_headroom = params->xdp_prog ?
+ XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
+ u32 frag_sz;
- return MLX5_SKB_FRAG_SZ(rq_headroom + hw_mtu);
- }
+ linear_rq_headroom += NET_IP_ALIGN;
+
+ frag_sz = MLX5_SKB_FRAG_SZ(linear_rq_headroom + hw_mtu);
+
+ if (params->xdp_prog && frag_sz < PAGE_SIZE)
+ frag_sz = PAGE_SIZE;
- return PAGE_SIZE;
+ return frag_sz;
}
static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params)
@@ -222,7 +229,7 @@ static void mlx5e_update_carrier(struct mlx5e_priv *priv)
u8 port_state;
port_state = mlx5_query_vport_state(mdev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT,
+ MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT,
0);
if (port_state == VPORT_STATE_UP) {
@@ -270,12 +277,9 @@ void mlx5e_update_stats_work(struct work_struct *work)
struct delayed_work *dwork = to_delayed_work(work);
struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
update_stats_work);
+
mutex_lock(&priv->state_lock);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- priv->profile->update_stats(priv);
- queue_delayed_work(priv->wq, dwork,
- msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
- }
+ priv->profile->update_stats(priv);
mutex_unlock(&priv->state_lock);
}
@@ -352,8 +356,9 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
{
int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
- rq->mpwqe.info = kcalloc_node(wq_sz, sizeof(*rq->mpwqe.info),
- GFP_KERNEL, cpu_to_node(c->cpu));
+ rq->mpwqe.info = kvzalloc_node(array_size(wq_sz,
+ sizeof(*rq->mpwqe.info)),
+ GFP_KERNEL, cpu_to_node(c->cpu));
if (!rq->mpwqe.info)
return -ENOMEM;
@@ -487,7 +492,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->channel = c;
rq->ix = c->ix;
rq->mdev = mdev;
- rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->stats = &c->priv->channel_stats[c->ix].rq;
rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
@@ -670,7 +674,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
err_free:
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- kfree(rq->mpwqe.info);
+ kvfree(rq->mpwqe.info);
mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
@@ -702,7 +706,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- kfree(rq->mpwqe.info);
+ kvfree(rq->mpwqe.info);
mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
@@ -879,7 +883,7 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
/* UMR WQE (if in progress) is always at wq->head */
if (rq->mpwqe.umr_in_progress)
- mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
+ rq->dealloc_wqe(rq, wq->head);
while (!mlx5_wq_ll_is_empty(wq)) {
struct mlx5e_rx_wqe_ll *wqe;
@@ -965,16 +969,16 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq)
static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
{
- kfree(sq->db.di);
+ kvfree(sq->db.xdpi);
}
static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
- sq->db.di = kcalloc_node(wq_sz, sizeof(*sq->db.di),
- GFP_KERNEL, numa);
- if (!sq->db.di) {
+ sq->db.xdpi = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.xdpi)),
+ GFP_KERNEL, numa);
+ if (!sq->db.xdpi) {
mlx5e_free_xdpsq_db(sq);
return -ENOMEM;
}
@@ -985,7 +989,8 @@ static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_sq_param *param,
- struct mlx5e_xdpsq *sq)
+ struct mlx5e_xdpsq *sq,
+ bool is_redirect)
{
void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
struct mlx5_core_dev *mdev = c->mdev;
@@ -997,6 +1002,10 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
sq->channel = c;
sq->uar_map = mdev->mlx5e_res.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
+ sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ sq->stats = is_redirect ?
+ &c->priv->channel_stats[c->ix].xdpsq :
+ &c->priv->channel_stats[c->ix].rq_xdpsq;
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1024,15 +1033,16 @@ static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
{
- kfree(sq->db.ico_wqe);
+ kvfree(sq->db.ico_wqe);
}
static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
{
u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
- sq->db.ico_wqe = kcalloc_node(wq_sz, sizeof(*sq->db.ico_wqe),
- GFP_KERNEL, numa);
+ sq->db.ico_wqe = kvzalloc_node(array_size(wq_sz,
+ sizeof(*sq->db.ico_wqe)),
+ GFP_KERNEL, numa);
if (!sq->db.ico_wqe)
return -ENOMEM;
@@ -1077,8 +1087,8 @@ static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
{
- kfree(sq->db.wqe_info);
- kfree(sq->db.dma_fifo);
+ kvfree(sq->db.wqe_info);
+ kvfree(sq->db.dma_fifo);
}
static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
@@ -1086,10 +1096,12 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
- sq->db.dma_fifo = kcalloc_node(df_sz, sizeof(*sq->db.dma_fifo),
- GFP_KERNEL, numa);
- sq->db.wqe_info = kcalloc_node(wq_sz, sizeof(*sq->db.wqe_info),
- GFP_KERNEL, numa);
+ sq->db.dma_fifo = kvzalloc_node(array_size(df_sz,
+ sizeof(*sq->db.dma_fifo)),
+ GFP_KERNEL, numa);
+ sq->db.wqe_info = kvzalloc_node(array_size(wq_sz,
+ sizeof(*sq->db.wqe_info)),
+ GFP_KERNEL, numa);
if (!sq->db.dma_fifo || !sq->db.wqe_info) {
mlx5e_free_txqsq_db(sq);
return -ENOMEM;
@@ -1523,7 +1535,8 @@ static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_sq_param *param,
- struct mlx5e_xdpsq *sq)
+ struct mlx5e_xdpsq *sq,
+ bool is_redirect)
{
unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
struct mlx5e_create_sq_param csp = {};
@@ -1531,7 +1544,7 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
int err;
int i;
- err = mlx5e_alloc_xdpsq(c, params, param, sq);
+ err = mlx5e_alloc_xdpsq(c, params, param, sq, is_redirect);
if (err)
return err;
@@ -1540,6 +1553,8 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
+ if (is_redirect)
+ set_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state);
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
if (err)
@@ -1893,7 +1908,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
int err;
int eqn;
- c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
+ c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
if (!c)
return -ENOMEM;
@@ -1922,10 +1937,14 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
if (err)
goto err_close_icosq_cq;
- err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq);
+ err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xdpsq.cq);
if (err)
goto err_close_tx_cqs;
+ err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq);
+ if (err)
+ goto err_close_xdp_tx_cqs;
+
/* XDP SQ CQ params are same as normal TXQ sq CQ params */
err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation,
&cparam->tx_cq, &c->rq.xdpsq.cq) : 0;
@@ -1942,7 +1961,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
if (err)
goto err_close_icosq;
- err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq) : 0;
+ err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq, false) : 0;
if (err)
goto err_close_sqs;
@@ -1950,9 +1969,17 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
if (err)
goto err_close_xdp_sq;
+ err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->xdpsq, true);
+ if (err)
+ goto err_close_rq;
+
*cp = c;
return 0;
+
+err_close_rq:
+ mlx5e_close_rq(&c->rq);
+
err_close_xdp_sq:
if (c->xdp)
mlx5e_close_xdpsq(&c->rq.xdpsq);
@@ -1971,6 +1998,9 @@ err_disable_napi:
err_close_rx_cq:
mlx5e_close_cq(&c->rq.cq);
+err_close_xdp_tx_cqs:
+ mlx5e_close_cq(&c->xdpsq.cq);
+
err_close_tx_cqs:
mlx5e_close_tx_cqs(c);
@@ -1979,7 +2009,7 @@ err_close_icosq_cq:
err_napi_del:
netif_napi_del(&c->napi);
- kfree(c);
+ kvfree(c);
return err;
}
@@ -2005,6 +2035,7 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
static void mlx5e_close_channel(struct mlx5e_channel *c)
{
+ mlx5e_close_xdpsq(&c->xdpsq);
mlx5e_close_rq(&c->rq);
if (c->xdp)
mlx5e_close_xdpsq(&c->rq.xdpsq);
@@ -2014,11 +2045,12 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
if (c->xdp)
mlx5e_close_cq(&c->rq.xdpsq.cq);
mlx5e_close_cq(&c->rq.cq);
+ mlx5e_close_cq(&c->xdpsq.cq);
mlx5e_close_tx_cqs(c);
mlx5e_close_cq(&c->icosq.cq);
netif_napi_del(&c->napi);
- kfree(c);
+ kvfree(c);
}
#define DEFAULT_FRAG_SIZE (2048)
@@ -2276,7 +2308,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
chs->num = chs->params.num_channels;
chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
- cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
+ cparam = kvzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
if (!chs->c || !cparam)
goto err_free;
@@ -2287,7 +2319,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
goto err_close_channels;
}
- kfree(cparam);
+ kvfree(cparam);
return 0;
err_close_channels:
@@ -2296,7 +2328,7 @@ err_close_channels:
err_free:
kfree(chs->c);
- kfree(cparam);
+ kvfree(cparam);
chs->num = 0;
return err;
}
@@ -2943,7 +2975,7 @@ int mlx5e_open(struct net_device *netdev)
mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
mutex_unlock(&priv->state_lock);
- if (mlx5e_vxlan_allowed(priv->mdev))
+ if (mlx5_vxlan_allowed(priv->mdev->vxlan))
udp_tunnel_get_rx_info(netdev);
return err;
@@ -3371,7 +3403,7 @@ static int mlx5e_setup_tc_block(struct net_device *dev,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, mlx5e_setup_tc_block_cb,
- priv, priv);
+ priv, priv, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, mlx5e_setup_tc_block_cb,
priv);
@@ -3405,6 +3437,9 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
struct mlx5e_vport_stats *vstats = &priv->stats.vport;
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ /* update HW stats in background for next time */
+ queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
+
if (mlx5e_is_uplink_rep(priv)) {
stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
@@ -3590,7 +3625,7 @@ unlock:
return err;
}
-#ifdef CONFIG_RFS_ACCEL
+#ifdef CONFIG_MLX5_EN_ARFS
static int set_feature_arfs(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -3645,7 +3680,7 @@ static int mlx5e_set_features(struct net_device *netdev,
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
-#ifdef CONFIG_RFS_ACCEL
+#ifdef CONFIG_MLX5_EN_ARFS
err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs);
#endif
@@ -3703,6 +3738,14 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
new_channels.params = *params;
new_channels.params.sw_mtu = new_mtu;
+ if (params->xdp_prog &&
+ !mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
+ netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n",
+ new_mtu, MLX5E_XDP_MAX_MTU);
+ err = -EINVAL;
+ goto out;
+ }
+
if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params);
u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params);
@@ -3740,7 +3783,8 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
struct hwtstamp_config config;
int err;
- if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
+ if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
+ (mlx5_clock_get_ptp_index(priv->mdev) == -1))
return -EOPNOTSUPP;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
@@ -3874,9 +3918,9 @@ static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
static int mlx5_vport_link2ifla(u8 esw_link)
{
switch (esw_link) {
- case MLX5_ESW_VPORT_ADMIN_STATE_DOWN:
+ case MLX5_VPORT_ADMIN_STATE_DOWN:
return IFLA_VF_LINK_STATE_DISABLE;
- case MLX5_ESW_VPORT_ADMIN_STATE_UP:
+ case MLX5_VPORT_ADMIN_STATE_UP:
return IFLA_VF_LINK_STATE_ENABLE;
}
return IFLA_VF_LINK_STATE_AUTO;
@@ -3886,11 +3930,11 @@ static int mlx5_ifla_link2vport(u8 ifla_link)
{
switch (ifla_link) {
case IFLA_VF_LINK_STATE_DISABLE:
- return MLX5_ESW_VPORT_ADMIN_STATE_DOWN;
+ return MLX5_VPORT_ADMIN_STATE_DOWN;
case IFLA_VF_LINK_STATE_ENABLE:
- return MLX5_ESW_VPORT_ADMIN_STATE_UP;
+ return MLX5_VPORT_ADMIN_STATE_UP;
}
- return MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
+ return MLX5_VPORT_ADMIN_STATE_AUTO;
}
static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
@@ -3928,6 +3972,57 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
}
#endif
+struct mlx5e_vxlan_work {
+ struct work_struct work;
+ struct mlx5e_priv *priv;
+ u16 port;
+};
+
+static void mlx5e_vxlan_add_work(struct work_struct *work)
+{
+ struct mlx5e_vxlan_work *vxlan_work =
+ container_of(work, struct mlx5e_vxlan_work, work);
+ struct mlx5e_priv *priv = vxlan_work->priv;
+ u16 port = vxlan_work->port;
+
+ mutex_lock(&priv->state_lock);
+ mlx5_vxlan_add_port(priv->mdev->vxlan, port);
+ mutex_unlock(&priv->state_lock);
+
+ kfree(vxlan_work);
+}
+
+static void mlx5e_vxlan_del_work(struct work_struct *work)
+{
+ struct mlx5e_vxlan_work *vxlan_work =
+ container_of(work, struct mlx5e_vxlan_work, work);
+ struct mlx5e_priv *priv = vxlan_work->priv;
+ u16 port = vxlan_work->port;
+
+ mutex_lock(&priv->state_lock);
+ mlx5_vxlan_del_port(priv->mdev->vxlan, port);
+ mutex_unlock(&priv->state_lock);
+ kfree(vxlan_work);
+}
+
+static void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, u16 port, int add)
+{
+ struct mlx5e_vxlan_work *vxlan_work;
+
+ vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
+ if (!vxlan_work)
+ return;
+
+ if (add)
+ INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_work);
+ else
+ INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_work);
+
+ vxlan_work->priv = priv;
+ vxlan_work->port = port;
+ queue_work(priv->wq, &vxlan_work->work);
+}
+
static void mlx5e_add_vxlan_port(struct net_device *netdev,
struct udp_tunnel_info *ti)
{
@@ -3936,10 +4031,10 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev,
if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
return;
- if (!mlx5e_vxlan_allowed(priv->mdev))
+ if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
return;
- mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
+ mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 1);
}
static void mlx5e_del_vxlan_port(struct net_device *netdev,
@@ -3950,10 +4045,10 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev,
if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
return;
- if (!mlx5e_vxlan_allowed(priv->mdev))
+ if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
return;
- mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0);
+ mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 0);
}
static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
@@ -3984,7 +4079,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
port = be16_to_cpu(udph->dest);
/* Verify if UDP port is being offloaded by HW */
- if (mlx5e_vxlan_lookup_port(priv, port))
+ if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
return features;
}
@@ -4091,26 +4186,47 @@ static void mlx5e_tx_timeout(struct net_device *dev)
queue_work(priv->wq, &priv->tx_timeout_work);
}
+static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
+{
+ struct net_device *netdev = priv->netdev;
+ struct mlx5e_channels new_channels = {};
+
+ if (priv->channels.params.lro_en) {
+ netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
+ return -EINVAL;
+ }
+
+ if (MLX5_IPSEC_DEV(priv->mdev)) {
+ netdev_warn(netdev, "can't set XDP with IPSec offload\n");
+ return -EINVAL;
+ }
+
+ new_channels.params = priv->channels.params;
+ new_channels.params.xdp_prog = prog;
+
+ if (!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
+ netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n",
+ new_channels.params.sw_mtu, MLX5E_XDP_MAX_MTU);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct bpf_prog *old_prog;
- int err = 0;
bool reset, was_opened;
+ int err = 0;
int i;
mutex_lock(&priv->state_lock);
- if ((netdev->features & NETIF_F_LRO) && prog) {
- netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
- err = -EINVAL;
- goto unlock;
- }
-
- if ((netdev->features & NETIF_F_HW_ESP) && prog) {
- netdev_warn(netdev, "can't set XDP with IPSec offload\n");
- err = -EINVAL;
- goto unlock;
+ if (prog) {
+ err = mlx5e_xdp_allowed(priv, prog);
+ if (err)
+ goto unlock;
}
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
@@ -4193,7 +4309,6 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
return mlx5e_xdp_set(dev, xdp->prog);
case XDP_QUERY_PROG:
xdp->prog_id = mlx5e_xdp_query(dev);
- xdp->prog_attached = !!xdp->prog_id;
return 0;
default:
return -EINVAL;
@@ -4235,11 +4350,12 @@ static const struct net_device_ops mlx5e_netdev_ops = {
.ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
.ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
.ndo_features_check = mlx5e_features_check,
-#ifdef CONFIG_RFS_ACCEL
- .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
-#endif
.ndo_tx_timeout = mlx5e_tx_timeout,
.ndo_bpf = mlx5e_xdp,
+ .ndo_xdp_xmit = mlx5e_xdp_xmit,
+#ifdef CONFIG_MLX5_EN_ARFS
+ .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
+#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mlx5e_netpoll,
#endif
@@ -4535,8 +4651,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
- if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
- netdev->hw_features |= NETIF_F_GSO_PARTIAL;
+ if (mlx5_vxlan_allowed(mdev->vxlan) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
netdev->hw_enc_features |= NETIF_F_IP_CSUM;
netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
@@ -4544,7 +4659,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL;
}
- if (mlx5e_vxlan_allowed(mdev)) {
+ if (mlx5_vxlan_allowed(mdev->vxlan)) {
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
@@ -4561,6 +4676,11 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
NETIF_F_GSO_GRE_CSUM;
}
+ netdev->hw_features |= NETIF_F_GSO_PARTIAL;
+ netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4;
+ netdev->hw_features |= NETIF_F_GSO_UDP_L4;
+ netdev->features |= NETIF_F_GSO_UDP_L4;
+
mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
if (fcs_supported)
@@ -4585,7 +4705,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
FT_CAP(identified_miss_table_mode) &&
FT_CAP(flow_table_modify)) {
netdev->hw_features |= NETIF_F_HW_TC;
-#ifdef CONFIG_RFS_ACCEL
+#ifdef CONFIG_MLX5_EN_ARFS
netdev->hw_features |= NETIF_F_NTUPLE;
#endif
}
@@ -4650,14 +4770,12 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
mlx5e_build_nic_netdev(netdev);
mlx5e_build_tc2txq_maps(priv);
- mlx5e_vxlan_init(priv);
}
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
mlx5e_tls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
- mlx5e_vxlan_cleanup(priv);
}
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
@@ -4831,7 +4949,7 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
return NULL;
}
-#ifdef CONFIG_RFS_ACCEL
+#ifdef CONFIG_MLX5_EN_ARFS
netdev->rx_cpu_rmap = mdev->rmap;
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 2b8040a3cdbd..c9cc9747d21d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -698,8 +698,8 @@ static int mlx5e_rep_open(struct net_device *dev)
goto unlock;
if (!mlx5_modify_vport_admin_state(priv->mdev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
- rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP))
+ MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
+ rep->vport, MLX5_VPORT_ADMIN_STATE_UP))
netif_carrier_on(dev);
unlock:
@@ -716,8 +716,8 @@ static int mlx5e_rep_close(struct net_device *dev)
mutex_lock(&priv->state_lock);
mlx5_modify_vport_admin_state(priv->mdev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
- rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
+ MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
+ rep->vport, MLX5_VPORT_ADMIN_STATE_DOWN);
ret = mlx5e_close_locked(dev);
mutex_unlock(&priv->state_lock);
return ret;
@@ -797,7 +797,7 @@ static int mlx5e_rep_setup_tc_block(struct net_device *dev,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, mlx5e_rep_setup_tc_cb,
- priv, priv);
+ priv, priv, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, mlx5e_rep_setup_tc_cb, priv);
return 0;
@@ -897,6 +897,9 @@ mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ /* update HW stats in background for next time */
+ queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
+
memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index d3a1dd20e41d..15d8ae28c040 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -34,7 +34,6 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
-#include <linux/bpf_trace.h>
#include <net/busy_poll.h>
#include <net/ip6_checksum.h>
#include <net/page_pool.h>
@@ -44,7 +43,9 @@
#include "en_rep.h"
#include "ipoib/ipoib.h"
#include "en_accel/ipsec_rxtx.h"
+#include "en_accel/tls_rxtx.h"
#include "lib/clock.h"
+#include "en/xdp.h"
static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
{
@@ -238,8 +239,7 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
return 0;
}
-static void mlx5e_page_dma_unmap(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info)
+void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info)
{
dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir);
}
@@ -276,10 +276,11 @@ static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
}
static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
- struct mlx5e_wqe_frag_info *frag)
+ struct mlx5e_wqe_frag_info *frag,
+ bool recycle)
{
if (frag->last_in_page)
- mlx5e_page_release(rq, frag->di, true);
+ mlx5e_page_release(rq, frag->di, recycle);
}
static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
@@ -307,25 +308,26 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
free_frags:
while (--i >= 0)
- mlx5e_put_rx_frag(rq, --frag);
+ mlx5e_put_rx_frag(rq, --frag, true);
return err;
}
static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
- struct mlx5e_wqe_frag_info *wi)
+ struct mlx5e_wqe_frag_info *wi,
+ bool recycle)
{
int i;
for (i = 0; i < rq->wqe.info.num_frags; i++, wi++)
- mlx5e_put_rx_frag(rq, wi);
+ mlx5e_put_rx_frag(rq, wi, recycle);
}
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix);
- mlx5e_free_rx_wqe(rq, wi);
+ mlx5e_free_rx_wqe(rq, wi, false);
}
static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk)
@@ -395,7 +397,8 @@ mlx5e_copy_skb_header_mpwqe(struct device *pdev,
}
}
-void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
+static void
+mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle)
{
const bool no_xdp_xmit =
bitmap_empty(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
@@ -404,7 +407,7 @@ void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++)
if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap))
- mlx5e_page_release(rq, &dma_info[i], true);
+ mlx5e_page_release(rq, &dma_info[i], recycle);
}
static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
@@ -487,7 +490,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
sq->pc += MLX5E_UMR_WQEBBS;
- mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &umr_wqe->ctrl);
+ mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &umr_wqe->ctrl);
return 0;
@@ -504,8 +507,8 @@ err_unmap:
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
-
- mlx5e_free_rx_mpwqe(rq, wi);
+ /* Don't recycle, this function is called on rq/netdev close */
+ mlx5e_free_rx_mpwqe(rq, wi, false);
}
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
@@ -601,6 +604,8 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
if (!rq->mpwqe.umr_in_progress)
mlx5e_alloc_rx_mpwqe(rq, wq->head);
+ else
+ rq->stats->congst_umr += mlx5_wq_ll_missing(wq) > 2;
return false;
}
@@ -795,6 +800,11 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
struct net_device *netdev = rq->netdev;
skb->mac_len = ETH_HLEN;
+
+#ifdef CONFIG_MLX5_EN_TLS
+ mlx5e_tls_handle_rx_skb(netdev, skb, &cqe_bcnt);
+#endif
+
if (lro_num_seg > 1) {
mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
@@ -839,135 +849,6 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
}
-static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
-{
- struct mlx5_wq_cyc *wq = &sq->wq;
- struct mlx5e_tx_wqe *wqe;
- u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc - 1); /* last pi */
-
- wqe = mlx5_wq_cyc_get_wqe(wq, pi);
-
- mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &wqe->ctrl);
-}
-
-static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *di,
- const struct xdp_buff *xdp)
-{
- struct mlx5e_xdpsq *sq = &rq->xdpsq;
- struct mlx5_wq_cyc *wq = &sq->wq;
- u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
-
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
- struct mlx5_wqe_data_seg *dseg;
-
- ptrdiff_t data_offset = xdp->data - xdp->data_hard_start;
- dma_addr_t dma_addr = di->addr + data_offset;
- unsigned int dma_len = xdp->data_end - xdp->data;
-
- struct mlx5e_rq_stats *stats = rq->stats;
-
- prefetchw(wqe);
-
- if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || rq->hw_mtu < dma_len)) {
- stats->xdp_drop++;
- return false;
- }
-
- if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) {
- if (sq->db.doorbell) {
- /* SQ is full, ring doorbell */
- mlx5e_xmit_xdp_doorbell(sq);
- sq->db.doorbell = false;
- }
- stats->xdp_tx_full++;
- return false;
- }
-
- dma_sync_single_for_device(sq->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
-
- cseg->fm_ce_se = 0;
-
- dseg = (struct mlx5_wqe_data_seg *)eseg + 1;
-
- /* copy the inline part if required */
- if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
- memcpy(eseg->inline_hdr.start, xdp->data, MLX5E_XDP_MIN_INLINE);
- eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
- dma_len -= MLX5E_XDP_MIN_INLINE;
- dma_addr += MLX5E_XDP_MIN_INLINE;
- dseg++;
- }
-
- /* write the dma part */
- dseg->addr = cpu_to_be64(dma_addr);
- dseg->byte_count = cpu_to_be32(dma_len);
-
- cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
-
- /* move page to reference to sq responsibility,
- * and mark so it's not put back in page-cache.
- */
- __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
- sq->db.di[pi] = *di;
- sq->pc++;
-
- sq->db.doorbell = true;
-
- stats->xdp_tx++;
- return true;
-}
-
-/* returns true if packet was consumed by xdp */
-static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *di,
- void *va, u16 *rx_headroom, u32 *len)
-{
- struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
- struct xdp_buff xdp;
- u32 act;
- int err;
-
- if (!prog)
- return false;
-
- xdp.data = va + *rx_headroom;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + *len;
- xdp.data_hard_start = va;
- xdp.rxq = &rq->xdp_rxq;
-
- act = bpf_prog_run_xdp(prog, &xdp);
- switch (act) {
- case XDP_PASS:
- *rx_headroom = xdp.data - xdp.data_hard_start;
- *len = xdp.data_end - xdp.data;
- return false;
- case XDP_TX:
- if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp)))
- trace_xdp_exception(rq->netdev, prog, act);
- return true;
- case XDP_REDIRECT:
- /* When XDP enabled then page-refcnt==1 here */
- err = xdp_do_redirect(rq->netdev, &xdp, prog);
- if (!err) {
- __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
- rq->xdpsq.db.redirect_flush = true;
- mlx5e_page_dma_unmap(rq, di);
- }
- return true;
- default:
- bpf_warn_invalid_xdp_action(act);
- case XDP_ABORTED:
- trace_xdp_exception(rq->netdev, prog, act);
- case XDP_DROP:
- rq->stats->xdp_drop++;
- return true;
- }
-}
-
static inline
struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
u32 frag_size, u16 headroom,
@@ -1105,7 +986,7 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
napi_gro_receive(rq->cq.napi, skb);
free_wqe:
- mlx5e_free_rx_wqe(rq, wi);
+ mlx5e_free_rx_wqe(rq, wi, true);
wq_cyc_pop:
mlx5_wq_cyc_pop(wq);
}
@@ -1147,7 +1028,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
napi_gro_receive(rq->cq.napi, skb);
free_wqe:
- mlx5e_free_rx_wqe(rq, wi);
+ mlx5e_free_rx_wqe(rq, wi, true);
wq_cyc_pop:
mlx5_wq_cyc_pop(wq);
}
@@ -1218,6 +1099,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
frag_size, DMA_FROM_DEVICE);
+ prefetchw(va); /* xdp_frame data area */
prefetch(data);
rcu_read_lock();
@@ -1261,7 +1143,10 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
}
if (unlikely(mpwrq_is_filler_cqe(cqe))) {
- rq->stats->mpwqe_filler++;
+ struct mlx5e_rq_stats *stats = rq->stats;
+
+ stats->mpwqe_filler_cqes++;
+ stats->mpwqe_filler_strides += cstrides;
goto mpwrq_cqe_out;
}
@@ -1281,7 +1166,7 @@ mpwrq_cqe_out:
wq = &rq->mpwqe.wq;
wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
- mlx5e_free_rx_mpwqe(rq, wi);
+ mlx5e_free_rx_mpwqe(rq, wi, true);
mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
}
@@ -1317,14 +1202,14 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
rq->handle_rx_cqe(rq, cqe);
} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
- if (xdpsq->db.doorbell) {
+ if (xdpsq->doorbell) {
mlx5e_xmit_xdp_doorbell(xdpsq);
- xdpsq->db.doorbell = false;
+ xdpsq->doorbell = false;
}
- if (xdpsq->db.redirect_flush) {
+ if (xdpsq->redirect_flush) {
xdp_do_flush_map();
- xdpsq->db.redirect_flush = false;
+ xdpsq->redirect_flush = false;
}
mlx5_cqwq_update_db_record(&cq->wq);
@@ -1335,78 +1220,6 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
return work_done;
}
-bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
-{
- struct mlx5e_xdpsq *sq;
- struct mlx5_cqe64 *cqe;
- struct mlx5e_rq *rq;
- u16 sqcc;
- int i;
-
- sq = container_of(cq, struct mlx5e_xdpsq, cq);
-
- if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
- return false;
-
- cqe = mlx5_cqwq_get_cqe(&cq->wq);
- if (!cqe)
- return false;
-
- rq = container_of(sq, struct mlx5e_rq, xdpsq);
-
- /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
- * otherwise a cq overrun may occur
- */
- sqcc = sq->cc;
-
- i = 0;
- do {
- u16 wqe_counter;
- bool last_wqe;
-
- mlx5_cqwq_pop(&cq->wq);
-
- wqe_counter = be16_to_cpu(cqe->wqe_counter);
-
- do {
- struct mlx5e_dma_info *di;
- u16 ci;
-
- last_wqe = (sqcc == wqe_counter);
-
- ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
- di = &sq->db.di[ci];
-
- sqcc++;
- /* Recycle RX page */
- mlx5e_page_release(rq, di, true);
- } while (!last_wqe);
- } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
-
- mlx5_cqwq_update_db_record(&cq->wq);
-
- /* ensure cq space is freed before enabling more cqes */
- wmb();
-
- sq->cc = sqcc;
- return (i == MLX5E_TX_CQ_POLL_BUDGET);
-}
-
-void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
-{
- struct mlx5e_rq *rq = container_of(sq, struct mlx5e_rq, xdpsq);
- struct mlx5e_dma_info *di;
- u16 ci;
-
- while (sq->cc != sq->pc) {
- ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
- di = &sq->db.di[ci];
- sq->cc++;
-
- mlx5e_page_release(rq, di, false);
- }
-}
-
#ifdef CONFIG_MLX5_CORE_IPOIB
#define MLX5_IB_GRH_DGID_OFFSET 24
@@ -1508,7 +1321,7 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
napi_gro_receive(rq->cq.napi, skb);
wq_free_wqe:
- mlx5e_free_rx_wqe(rq, wi);
+ mlx5e_free_rx_wqe(rq, wi, true);
mlx5_wq_cyc_pop(wq);
}
@@ -1531,19 +1344,19 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
if (unlikely(!skb)) {
/* a DROP, save the page-reuse checks */
- mlx5e_free_rx_wqe(rq, wi);
+ mlx5e_free_rx_wqe(rq, wi, true);
goto wq_cyc_pop;
}
- skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb);
+ skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt);
if (unlikely(!skb)) {
- mlx5e_free_rx_wqe(rq, wi);
+ mlx5e_free_rx_wqe(rq, wi, true);
goto wq_cyc_pop;
}
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
napi_gro_receive(rq->cq.napi, skb);
- mlx5e_free_rx_wqe(rq, wi);
+ mlx5e_free_rx_wqe(rq, wi, true);
wq_cyc_pop:
mlx5_wq_cyc_pop(wq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 4d316cc9b008..35ded91203f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -74,7 +74,7 @@ static int mlx5e_test_link_state(struct mlx5e_priv *priv)
if (!netif_carrier_ok(priv->netdev))
return 1;
- port_state = mlx5_query_vport_state(priv->mdev, MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
+ port_state = mlx5_query_vport_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT, 0);
return port_state == VPORT_STATE_UP ? 0 : 1;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 1646859974ce..12fdf5c92b67 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -44,6 +44,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
@@ -58,8 +59,11 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
@@ -67,10 +71,17 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
@@ -80,6 +91,11 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
};
@@ -118,6 +134,8 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) {
struct mlx5e_channel_stats *channel_stats =
&priv->channel_stats[i];
+ struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq;
+ struct mlx5e_xdpsq_stats *xdpsq_stats = &channel_stats->rq_xdpsq;
struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
struct mlx5e_ch_stats *ch_stats = &channel_stats->ch;
int j;
@@ -131,11 +149,15 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_csum_complete += rq_stats->csum_complete;
s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
- s->rx_xdp_drop += rq_stats->xdp_drop;
- s->rx_xdp_tx += rq_stats->xdp_tx;
- s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
+ s->rx_xdp_drop += rq_stats->xdp_drop;
+ s->rx_xdp_redirect += rq_stats->xdp_redirect;
+ s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
+ s->rx_xdp_tx_full += xdpsq_stats->full;
+ s->rx_xdp_tx_err += xdpsq_stats->err;
+ s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
s->rx_wqe_err += rq_stats->wqe_err;
- s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
+ s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
+ s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
@@ -145,7 +167,17 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_cache_empty += rq_stats->cache_empty;
s->rx_cache_busy += rq_stats->cache_busy;
s->rx_cache_waive += rq_stats->cache_waive;
- s->ch_eq_rearm += ch_stats->eq_rearm;
+ s->rx_congst_umr += rq_stats->congst_umr;
+ s->ch_events += ch_stats->events;
+ s->ch_poll += ch_stats->poll;
+ s->ch_arm += ch_stats->arm;
+ s->ch_aff_change += ch_stats->aff_change;
+ s->ch_eq_rearm += ch_stats->eq_rearm;
+ /* xdp redirect */
+ s->tx_xdp_xmit += xdpsq_red_stats->xmit;
+ s->tx_xdp_full += xdpsq_red_stats->full;
+ s->tx_xdp_err += xdpsq_red_stats->err;
+ s->tx_xdp_cqes += xdpsq_red_stats->cqes;
for (j = 0; j < priv->max_opened_tc; j++) {
struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
@@ -157,8 +189,10 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
+ s->tx_nop += sq_stats->nop;
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
+ s->tx_udp_seg_rem += sq_stats->udp_seg_rem;
s->tx_queue_dropped += sq_stats->dropped;
s->tx_cqe_err += sq_stats->cqe_err;
s->tx_recover += sq_stats->recover;
@@ -170,6 +204,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_tls_ooo += sq_stats->tls_ooo;
s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
#endif
+ s->tx_cqes += sq_stats->cqes;
}
}
@@ -1106,13 +1141,13 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
@@ -1122,6 +1157,7 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
};
static const struct counter_desc sq_stats_desc[] = {
@@ -1140,16 +1176,37 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
};
+static const struct counter_desc rq_xdpsq_stats_desc[] = {
+ { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
+ { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
+ { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
+ { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
+};
+
+static const struct counter_desc xdpsq_stats_desc[] = {
+ { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
+ { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
+ { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
+ { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
+};
+
static const struct counter_desc ch_stats_desc[] = {
+ { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
+ { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
+ { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
+ { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
};
#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
+#define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc)
+#define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc)
#define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
@@ -1158,7 +1215,9 @@ static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
return (NUM_RQ_STATS * max_nch) +
(NUM_CH_STATS * max_nch) +
- (NUM_SQ_STATS * max_nch * priv->max_opened_tc);
+ (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
+ (NUM_RQ_XDPSQ_STATS * max_nch) +
+ (NUM_XDPSQ_STATS * max_nch);
}
static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
@@ -1172,9 +1231,14 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
sprintf(data + (idx++) * ETH_GSTRING_LEN,
ch_stats_desc[j].format, i);
- for (i = 0; i < max_nch; i++)
+ for (i = 0; i < max_nch; i++) {
for (j = 0; j < NUM_RQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i);
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ rq_stats_desc[j].format, i);
+ for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ rq_xdpsq_stats_desc[j].format, i);
+ }
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < max_nch; i++)
@@ -1183,6 +1247,11 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
sq_stats_desc[j].format,
priv->channel_tc2txq[i][tc]);
+ for (i = 0; i < max_nch; i++)
+ for (j = 0; j < NUM_XDPSQ_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ xdpsq_stats_desc[j].format, i);
+
return idx;
}
@@ -1198,11 +1267,16 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
ch_stats_desc, j);
- for (i = 0; i < max_nch; i++)
+ for (i = 0; i < max_nch; i++) {
for (j = 0; j < NUM_RQ_STATS; j++)
data[idx++] =
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
rq_stats_desc, j);
+ for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq,
+ rq_xdpsq_stats_desc, j);
+ }
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < max_nch; i++)
@@ -1211,6 +1285,12 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
sq_stats_desc, j);
+ for (i = 0; i < max_nch; i++)
+ for (j = 0; j < NUM_XDPSQ_STATS; j++)
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq,
+ xdpsq_stats_desc, j);
+
return idx;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 643153bb3607..a4c035aedd46 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -44,6 +44,8 @@
#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_XDPSQ_STAT(type, fld) "tx%d_xdp_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_RQ_XDPSQ_STAT(type, fld) "rx%d_xdp_tx_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld)
struct counter_desc {
@@ -61,6 +63,7 @@ struct mlx5e_sw_stats {
u64 tx_tso_inner_packets;
u64 tx_tso_inner_bytes;
u64 tx_added_vlan_packets;
+ u64 tx_nop;
u64 rx_lro_packets;
u64 rx_lro_bytes;
u64 rx_removed_vlan_packets;
@@ -69,8 +72,11 @@ struct mlx5e_sw_stats {
u64 rx_csum_complete;
u64 rx_csum_unnecessary_inner;
u64 rx_xdp_drop;
- u64 rx_xdp_tx;
+ u64 rx_xdp_redirect;
+ u64 rx_xdp_tx_xmit;
u64 rx_xdp_tx_full;
+ u64 rx_xdp_tx_err;
+ u64 rx_xdp_tx_cqe;
u64 tx_csum_none;
u64 tx_csum_partial;
u64 tx_csum_partial_inner;
@@ -78,10 +84,17 @@ struct mlx5e_sw_stats {
u64 tx_queue_dropped;
u64 tx_xmit_more;
u64 tx_recover;
+ u64 tx_cqes;
u64 tx_queue_wake;
+ u64 tx_udp_seg_rem;
u64 tx_cqe_err;
+ u64 tx_xdp_xmit;
+ u64 tx_xdp_full;
+ u64 tx_xdp_err;
+ u64 tx_xdp_cqes;
u64 rx_wqe_err;
- u64 rx_mpwqe_filler;
+ u64 rx_mpwqe_filler_cqes;
+ u64 rx_mpwqe_filler_strides;
u64 rx_buff_alloc_err;
u64 rx_cqe_compress_blks;
u64 rx_cqe_compress_pkts;
@@ -91,6 +104,11 @@ struct mlx5e_sw_stats {
u64 rx_cache_empty;
u64 rx_cache_busy;
u64 rx_cache_waive;
+ u64 rx_congst_umr;
+ u64 ch_events;
+ u64 ch_poll;
+ u64 ch_arm;
+ u64 ch_aff_change;
u64 ch_eq_rearm;
#ifdef CONFIG_MLX5_EN_TLS
@@ -168,10 +186,10 @@ struct mlx5e_rq_stats {
u64 lro_bytes;
u64 removed_vlan_packets;
u64 xdp_drop;
- u64 xdp_tx;
- u64 xdp_tx_full;
+ u64 xdp_redirect;
u64 wqe_err;
- u64 mpwqe_filler;
+ u64 mpwqe_filler_cqes;
+ u64 mpwqe_filler_strides;
u64 buff_alloc_err;
u64 cqe_compress_blks;
u64 cqe_compress_pkts;
@@ -181,6 +199,7 @@ struct mlx5e_rq_stats {
u64 cache_empty;
u64 cache_busy;
u64 cache_waive;
+ u64 congst_umr;
};
struct mlx5e_sq_stats {
@@ -196,6 +215,7 @@ struct mlx5e_sq_stats {
u64 csum_partial_inner;
u64 added_vlan_packets;
u64 nop;
+ u64 udp_seg_rem;
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_ooo;
u64 tls_resync_bytes;
@@ -206,11 +226,24 @@ struct mlx5e_sq_stats {
u64 dropped;
u64 recover;
/* dirtied @completion */
- u64 wake ____cacheline_aligned_in_smp;
+ u64 cqes ____cacheline_aligned_in_smp;
+ u64 wake;
u64 cqe_err;
};
+struct mlx5e_xdpsq_stats {
+ u64 xmit;
+ u64 full;
+ u64 err;
+ /* dirtied @completion */
+ u64 cqes ____cacheline_aligned_in_smp;
+};
+
struct mlx5e_ch_stats {
+ u64 events;
+ u64 poll;
+ u64 arm;
+ u64 aff_change;
u64 eq_rearm;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index dfbcda0d0e08..9131a1376e7d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -50,7 +50,7 @@
#include "en_rep.h"
#include "en_tc.h"
#include "eswitch.h"
-#include "vxlan.h"
+#include "lib/vxlan.h"
#include "fs_core.h"
#include "en/port.h"
@@ -1032,10 +1032,8 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
* dst ip pair
*/
n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
- if (!n) {
- WARN(1, "The neighbour already freed\n");
+ if (!n)
return;
- }
neigh_event_send(n, NULL);
neigh_release(n);
@@ -1126,16 +1124,12 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ENC_PORTS,
f->mask);
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- struct net_device *up_dev = uplink_rpriv->netdev;
- struct mlx5e_priv *up_priv = netdev_priv(up_dev);
/* Full udp dst port must be given */
if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
goto vxlan_match_offload_err;
- if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
+ if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->dst)) &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
parse_vxlan_attr(spec, f);
else {
@@ -1213,6 +1207,26 @@ vxlan_match_offload_err:
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
}
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
+ struct flow_dissector_key_ip *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IP,
+ f->key);
+ struct flow_dissector_key_ip *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IP,
+ f->mask);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
+ }
+
/* Enforce DMAC when offloading incoming tunneled flows.
* Flow counters require a match on the DMAC.
*/
@@ -1237,6 +1251,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
outer_headers);
void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers);
+ void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
u16 addr_type = 0;
u8 ip_proto = 0;
@@ -1247,6 +1265,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_CVLAN) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_PORTS) |
@@ -1256,7 +1275,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_TCP) |
- BIT(FLOW_DISSECTOR_KEY_IP))) {
+ BIT(FLOW_DISSECTOR_KEY_IP) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_IP))) {
netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
f->dissector->used_keys);
return -EOPNOTSUPP;
@@ -1327,9 +1347,18 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_VLAN,
f->mask);
- if (mask->vlan_id || mask->vlan_priority) {
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
+ if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) {
+ if (key->vlan_tpid == htons(ETH_P_8021AD)) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ svlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ svlan_tag, 1);
+ } else {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ cvlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ cvlan_tag, 1);
+ }
MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
@@ -1341,6 +1370,41 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
}
}
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
+ struct flow_dissector_key_vlan *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_CVLAN,
+ f->key);
+ struct flow_dissector_key_vlan *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_CVLAN,
+ f->mask);
+ if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) {
+ if (key->vlan_tpid == htons(ETH_P_8021AD)) {
+ MLX5_SET(fte_match_set_misc, misc_c,
+ outer_second_svlan_tag, 1);
+ MLX5_SET(fte_match_set_misc, misc_v,
+ outer_second_svlan_tag, 1);
+ } else {
+ MLX5_SET(fte_match_set_misc, misc_c,
+ outer_second_cvlan_tag, 1);
+ MLX5_SET(fte_match_set_misc, misc_v,
+ outer_second_cvlan_tag, 1);
+ }
+
+ MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
+ mask->vlan_id);
+ MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
+ key->vlan_id);
+ MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
+ mask->vlan_priority);
+ MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
+ key->vlan_priority);
+
+ *match_level = MLX5_MATCH_L2;
+ }
+ }
+
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_dissector_key_basic *key =
skb_flow_dissector_target(f->dissector,
@@ -2082,7 +2146,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
struct net_device **out_dev,
struct flowi4 *fl4,
struct neighbour **out_n,
- int *out_ttl)
+ u8 *out_ttl)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *uplink_rpriv;
@@ -2106,7 +2170,8 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
else
*out_dev = rt->dst.dev;
- *out_ttl = ip4_dst_hoplimit(&rt->dst);
+ if (!(*out_ttl))
+ *out_ttl = ip4_dst_hoplimit(&rt->dst);
n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
ip_rt_put(rt);
if (!n)
@@ -2135,7 +2200,7 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
struct net_device **out_dev,
struct flowi6 *fl6,
struct neighbour **out_n,
- int *out_ttl)
+ u8 *out_ttl)
{
struct neighbour *n = NULL;
struct dst_entry *dst;
@@ -2150,7 +2215,8 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
if (ret < 0)
return ret;
- *out_ttl = ip6_dst_hoplimit(dst);
+ if (!(*out_ttl))
+ *out_ttl = ip6_dst_hoplimit(dst);
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
/* if the egress device isn't on the same HW e-switch, we use the uplink */
@@ -2174,7 +2240,7 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
static void gen_vxlan_header_ipv4(struct net_device *out_dev,
char buf[], int encap_size,
unsigned char h_dest[ETH_ALEN],
- int ttl,
+ u8 tos, u8 ttl,
__be32 daddr,
__be32 saddr,
__be16 udp_dst_port,
@@ -2194,6 +2260,7 @@ static void gen_vxlan_header_ipv4(struct net_device *out_dev,
ip->daddr = daddr;
ip->saddr = saddr;
+ ip->tos = tos;
ip->ttl = ttl;
ip->protocol = IPPROTO_UDP;
ip->version = 0x4;
@@ -2207,7 +2274,7 @@ static void gen_vxlan_header_ipv4(struct net_device *out_dev,
static void gen_vxlan_header_ipv6(struct net_device *out_dev,
char buf[], int encap_size,
unsigned char h_dest[ETH_ALEN],
- int ttl,
+ u8 tos, u8 ttl,
struct in6_addr *daddr,
struct in6_addr *saddr,
__be16 udp_dst_port,
@@ -2224,7 +2291,7 @@ static void gen_vxlan_header_ipv6(struct net_device *out_dev,
ether_addr_copy(eth->h_source, out_dev->dev_addr);
eth->h_proto = htons(ETH_P_IPV6);
- ip6_flow_hdr(ip6h, 0, 0);
+ ip6_flow_hdr(ip6h, tos, 0);
/* the HW fills up ipv6 payload len */
ip6h->nexthdr = IPPROTO_UDP;
ip6h->hop_limit = ttl;
@@ -2246,9 +2313,9 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
struct net_device *out_dev;
struct neighbour *n = NULL;
struct flowi4 fl4 = {};
+ u8 nud_state, tos, ttl;
char *encap_header;
- int ttl, err;
- u8 nud_state;
+ int err;
if (max_encap_size < ipv4_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
@@ -2269,6 +2336,10 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
err = -EOPNOTSUPP;
goto free_encap;
}
+
+ tos = tun_key->tos;
+ ttl = tun_key->ttl;
+
fl4.flowi4_tos = tun_key->tos;
fl4.daddr = tun_key->u.ipv4.dst;
fl4.saddr = tun_key->u.ipv4.src;
@@ -2303,7 +2374,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
switch (e->tunnel_type) {
case MLX5_HEADER_TYPE_VXLAN:
gen_vxlan_header_ipv4(out_dev, encap_header,
- ipv4_encap_size, e->h_dest, ttl,
+ ipv4_encap_size, e->h_dest, tos, ttl,
fl4.daddr,
fl4.saddr, tun_key->tp_dst,
tunnel_id_to_key32(tun_key->tun_id));
@@ -2351,9 +2422,9 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
struct net_device *out_dev;
struct neighbour *n = NULL;
struct flowi6 fl6 = {};
+ u8 nud_state, tos, ttl;
char *encap_header;
- int err, ttl = 0;
- u8 nud_state;
+ int err;
if (max_encap_size < ipv6_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
@@ -2375,6 +2446,9 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
goto free_encap;
}
+ tos = tun_key->tos;
+ ttl = tun_key->ttl;
+
fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
fl6.daddr = tun_key->u.ipv6.dst;
fl6.saddr = tun_key->u.ipv6.src;
@@ -2409,7 +2483,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
switch (e->tunnel_type) {
case MLX5_HEADER_TYPE_VXLAN:
gen_vxlan_header_ipv6(out_dev, encap_header,
- ipv6_encap_size, e->h_dest, ttl,
+ ipv6_encap_size, e->h_dest, tos, ttl,
&fl6.daddr,
&fl6.saddr, tun_key->tp_dst,
tunnel_id_to_key32(tun_key->tun_id));
@@ -2455,11 +2529,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw,
- REP_ETH);
- struct net_device *up_dev = uplink_rpriv->netdev;
unsigned short family = ip_tunnel_info_af(tun_info);
- struct mlx5e_priv *up_priv = netdev_priv(up_dev);
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct ip_tunnel_key *key = &tun_info->key;
struct mlx5e_encap_entry *e;
@@ -2479,7 +2549,7 @@ vxlan_encap_offload_err:
return -EOPNOTSUPP;
}
- if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
+ if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->tp_dst)) &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
tunnel_type = MLX5_HEADER_TYPE_VXLAN;
} else {
@@ -2535,6 +2605,56 @@ out_err:
return err;
}
+static int parse_tc_vlan_action(struct mlx5e_priv *priv,
+ const struct tc_action *a,
+ struct mlx5_esw_flow_attr *attr,
+ u32 *action)
+{
+ u8 vlan_idx = attr->total_vlan;
+
+ if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
+ return -EOPNOTSUPP;
+
+ if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
+ if (vlan_idx) {
+ if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
+ MLX5_FS_VLAN_DEPTH))
+ return -EOPNOTSUPP;
+
+ *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
+ } else {
+ *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+ }
+ } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
+ attr->vlan_vid[vlan_idx] = tcf_vlan_push_vid(a);
+ attr->vlan_prio[vlan_idx] = tcf_vlan_push_prio(a);
+ attr->vlan_proto[vlan_idx] = tcf_vlan_push_proto(a);
+ if (!attr->vlan_proto[vlan_idx])
+ attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
+
+ if (vlan_idx) {
+ if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
+ MLX5_FS_VLAN_DEPTH))
+ return -EOPNOTSUPP;
+
+ *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
+ } else {
+ if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
+ (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
+ tcf_vlan_push_prio(a)))
+ return -EOPNOTSUPP;
+
+ *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
+ }
+ } else { /* action is TCA_VLAN_ACT_MODIFY */
+ return -EOPNOTSUPP;
+ }
+
+ attr->total_vlan = vlan_idx + 1;
+
+ return 0;
+}
+
static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow)
@@ -2546,6 +2666,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
LIST_HEAD(actions);
bool encap = false;
u32 action = 0;
+ int err;
if (!tcf_exts_has_actions(exts))
return -EINVAL;
@@ -2562,8 +2683,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
}
if (is_tcf_pedit(a)) {
- int err;
-
err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
parse_attr);
if (err)
@@ -2630,23 +2749,11 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
}
if (is_tcf_vlan(a)) {
- if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
- action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
- } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
- action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
- attr->vlan_vid = tcf_vlan_push_vid(a);
- if (mlx5_eswitch_vlan_actions_supported(priv->mdev)) {
- attr->vlan_prio = tcf_vlan_push_prio(a);
- attr->vlan_proto = tcf_vlan_push_proto(a);
- if (!attr->vlan_proto)
- attr->vlan_proto = htons(ETH_P_8021Q);
- } else if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
- tcf_vlan_push_prio(a)) {
- return -EOPNOTSUPP;
- }
- } else { /* action is TCA_VLAN_ACT_MODIFY */
- return -EOPNOTSUPP;
- }
+ err = parse_tc_vlan_action(priv, a, attr, &action);
+
+ if (err)
+ return err;
+
attr->mirror_count = attr->out_count;
continue;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index f29deb44bf3b..ae73ea992845 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -66,22 +66,21 @@ static inline void mlx5e_tx_dma_unmap(struct device *pdev,
}
}
+static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
+{
+ return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
+}
+
static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq,
dma_addr_t addr,
u32 size,
enum mlx5e_dma_map_type map_type)
{
- u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask;
-
- sq->db.dma_fifo[i].addr = addr;
- sq->db.dma_fifo[i].size = size;
- sq->db.dma_fifo[i].type = map_type;
- sq->dma_fifo_pc++;
-}
+ struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
-static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
-{
- return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
+ dma->addr = addr;
+ dma->size = size;
+ dma->type = map_type;
}
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
@@ -111,10 +110,11 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb
#endif
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- int channel_ix = fallback(dev, skb);
+ int channel_ix = fallback(dev, skb, NULL);
u16 num_channels;
int up = 0;
@@ -228,7 +228,10 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb)
stats->tso_inner_packets++;
stats->tso_inner_bytes += skb->len - ihs;
} else {
- ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+ ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
+ else
+ ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
stats->tso_packets++;
stats->tso_bytes += skb->len - ihs;
}
@@ -443,12 +446,11 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
sq = priv->txq2sq[skb_get_queue_mapping(skb)];
mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
-#ifdef CONFIG_MLX5_ACCEL
/* might send skbs and update wqe and pi */
skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi);
if (unlikely(!skb))
return NETDEV_TX_OK;
-#endif
+
return mlx5e_sq_xmit(sq, skb, wqe, pi);
}
@@ -466,6 +468,7 @@ static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{
+ struct mlx5e_sq_stats *stats;
struct mlx5e_txqsq *sq;
struct mlx5_cqe64 *cqe;
u32 dma_fifo_cc;
@@ -483,6 +486,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
if (!cqe)
return false;
+ stats = sq->stats;
+
npkts = 0;
nbytes = 0;
@@ -511,7 +516,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
queue_work(cq->channel->priv->wq,
&sq->recover.recover_work);
}
- sq->stats->cqe_err++;
+ stats->cqe_err++;
}
do {
@@ -556,6 +561,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
+ stats->cqes += i;
+
mlx5_cqwq_update_db_record(&cq->wq);
/* ensure cq space is freed before enabling more cqes */
@@ -571,7 +578,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
MLX5E_SQ_STOP_ROOM) &&
!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
netif_tx_wake_queue(sq->txq);
- sq->stats->wake++;
+ stats->wake++;
}
return (i == MLX5E_TX_CQ_POLL_BUDGET);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 1b17f682693b..85d517360157 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -32,6 +32,7 @@
#include <linux/irq.h>
#include "en.h"
+#include "en/xdp.h"
static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
{
@@ -74,13 +75,18 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
{
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
napi);
+ struct mlx5e_ch_stats *ch_stats = c->stats;
bool busy = false;
int work_done = 0;
int i;
+ ch_stats->poll++;
+
for (i = 0; i < c->num_tc; i++)
busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
+ busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq);
+
if (c->xdp)
busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq);
@@ -94,6 +100,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
if (busy) {
if (likely(mlx5e_channel_no_affinity_change(c)))
return budget;
+ ch_stats->aff_change++;
if (budget && work_done == budget)
work_done--;
}
@@ -101,6 +108,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
if (unlikely(!napi_complete_done(napi, work_done)))
return work_done;
+ ch_stats->arm++;
+
for (i = 0; i < c->num_tc; i++) {
mlx5e_handle_tx_dim(&c->sq[i]);
mlx5e_cq_arm(&c->sq[i].cq);
@@ -110,6 +119,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
mlx5e_cq_arm(&c->rq.cq);
mlx5e_cq_arm(&c->icosq.cq);
+ mlx5e_cq_arm(&c->xdpsq.cq);
return work_done;
}
@@ -118,8 +128,9 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq)
{
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
- cq->event_ctr++;
napi_schedule(cq->napi);
+ cq->event_ctr++;
+ cq->channel->stats->events++;
}
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 406c23862f5f..48864f4988a4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -40,6 +40,8 @@
#include "mlx5_core.h"
#include "fpga/core.h"
#include "eswitch.h"
+#include "lib/clock.h"
+#include "diag/fw_tracer.h"
enum {
MLX5_EQE_SIZE = sizeof(struct mlx5_eqe),
@@ -168,6 +170,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_FPGA_QP_ERROR";
case MLX5_EVENT_TYPE_GENERAL_EVENT:
return "MLX5_EVENT_TYPE_GENERAL_EVENT";
+ case MLX5_EVENT_TYPE_DEVICE_TRACER:
+ return "MLX5_EVENT_TYPE_DEVICE_TRACER";
default:
return "Unrecognized event";
}
@@ -576,6 +580,11 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
case MLX5_EVENT_TYPE_GENERAL_EVENT:
general_event_handler(dev, eqe);
break;
+
+ case MLX5_EVENT_TYPE_DEVICE_TRACER:
+ mlx5_fw_tracer_event(dev, eqe);
+ break;
+
default:
mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
eqe->type, eq->eqn);
@@ -853,6 +862,9 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, temp_warn_event))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
+ if (MLX5_CAP_MCAM_REG(dev, tracer_registers))
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER);
+
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
"mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 40dba9e8af92..2b252cde5cc2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -246,7 +246,7 @@ esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
}
-static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
+static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = {};
@@ -1469,7 +1469,7 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
return;
mlx5_modify_vport_admin_state(esw->dev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
+ MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
vport_num,
vport->info.link_state);
mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, vport->info.mac);
@@ -1582,9 +1582,9 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
esw_vport_disable_qos(esw, vport_num);
if (vport_num && esw->mode == SRIOV_LEGACY) {
mlx5_modify_vport_admin_state(esw->dev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
+ MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
vport_num,
- MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
+ MLX5_VPORT_ADMIN_STATE_DOWN);
esw_vport_disable_egress_acl(esw, vport);
esw_vport_disable_ingress_acl(esw, vport);
esw_vport_destroy_drop_counters(vport);
@@ -1618,7 +1618,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
esw->mode = mode;
if (mode == SRIOV_LEGACY) {
- err = esw_create_legacy_fdb_table(esw, nvfs + 1);
+ err = esw_create_legacy_fdb_table(esw);
} else {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
@@ -1736,7 +1736,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
struct mlx5_vport *vport = &esw->vports[vport_num];
vport->vport = vport_num;
- vport->info.link_state = MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
+ vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
vport->dev = dev;
INIT_WORK(&vport->vport_change_handler,
esw_vport_change_handler);
@@ -1860,7 +1860,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
evport = &esw->vports[vport];
err = mlx5_modify_vport_admin_state(esw->dev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
+ MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
vport, link_state);
if (err) {
mlx5_core_warn(esw->dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index b174da2884c5..c17bfcab517c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -38,6 +38,7 @@
#include <net/devlink.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/eswitch.h>
+#include <linux/mlx5/fs.h>
#include "lib/mpfs.h"
#ifdef CONFIG_MLX5_ESWITCH
@@ -256,9 +257,10 @@ struct mlx5_esw_flow_attr {
int out_count;
int action;
- __be16 vlan_proto;
- u16 vlan_vid;
- u8 vlan_prio;
+ __be16 vlan_proto[MLX5_FS_VLAN_DEPTH];
+ u16 vlan_vid[MLX5_FS_VLAN_DEPTH];
+ u8 vlan_prio[MLX5_FS_VLAN_DEPTH];
+ u8 total_vlan;
bool vlan_handled;
u32 encap_id;
u32 mod_hdr_id;
@@ -282,10 +284,17 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
int vport, u16 vlan, u8 qos, u8 set_flags);
-static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev)
+static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
+ u8 vlan_depth)
{
- return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) &&
- MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan);
+ bool ret = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) &&
+ MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan);
+
+ if (vlan_depth == 1)
+ return ret;
+
+ return ret && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan_2) &&
+ MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2);
}
#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 91f1209886ff..f72b5c9dcfe9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -66,13 +66,18 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
flow_act.action = attr->action;
/* if per flow vlan pop/push is emulated, don't set that into the firmware */
- if (!mlx5_eswitch_vlan_actions_supported(esw->dev))
+ if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
- flow_act.vlan.ethtype = ntohs(attr->vlan_proto);
- flow_act.vlan.vid = attr->vlan_vid;
- flow_act.vlan.prio = attr->vlan_prio;
+ flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
+ flow_act.vlan[0].vid = attr->vlan_vid[0];
+ flow_act.vlan[0].prio = attr->vlan_prio[0];
+ if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
+ flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
+ flow_act.vlan[1].vid = attr->vlan_vid[1];
+ flow_act.vlan[1].prio = attr->vlan_prio[1];
+ }
}
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
@@ -266,7 +271,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
/* protects against (1) setting rules with different vlans to push and
* (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
*/
- if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid))
+ if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
goto out_notsupp;
return 0;
@@ -284,7 +289,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
int err = 0;
/* nop if we're on the vlan push/pop non emulation mode */
- if (mlx5_eswitch_vlan_actions_supported(esw->dev))
+ if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
return 0;
push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
@@ -324,11 +329,11 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
if (vport->vlan_refcount)
goto skip_set_push;
- err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid, 0,
+ err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
SET_VLAN_INSERT | SET_VLAN_STRIP);
if (err)
goto out;
- vport->vlan = attr->vlan_vid;
+ vport->vlan = attr->vlan_vid[0];
skip_set_push:
vport->vlan_refcount++;
}
@@ -347,7 +352,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
int err = 0;
/* nop if we're on the vlan push/pop non emulation mode */
- if (mlx5_eswitch_vlan_actions_supported(esw->dev))
+ if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
return 0;
if (!attr->vlan_handled)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
index c9736238604a..5cf5f2a9d51f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
@@ -129,6 +129,7 @@ static void mlx5_fpga_tls_cmd_send(struct mlx5_fpga_device *fdev,
static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
void *ptr)
{
+ unsigned long flags;
int ret;
/* TLS metadata format is 1 byte for syndrome followed
@@ -139,9 +140,9 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
BUILD_BUG_ON((SWID_END - 1) & 0xFF000000);
idr_preload(GFP_KERNEL);
- spin_lock_irq(idr_spinlock);
+ spin_lock_irqsave(idr_spinlock, flags);
ret = idr_alloc(idr, ptr, SWID_START, SWID_END, GFP_ATOMIC);
- spin_unlock_irq(idr_spinlock);
+ spin_unlock_irqrestore(idr_spinlock, flags);
idr_preload_end();
return ret;
@@ -157,6 +158,13 @@ static void mlx5_fpga_tls_release_swid(struct idr *idr,
spin_unlock_irqrestore(idr_spinlock, flags);
}
+static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_device *fdev,
+ struct mlx5_fpga_dma_buf *buf, u8 status)
+{
+ kfree(buf);
+}
+
struct mlx5_teardown_stream_context {
struct mlx5_fpga_tls_command_context cmd;
u32 swid;
@@ -178,9 +186,13 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
mlx5_fpga_err(fdev,
"Teardown stream failed with syndrome = %d",
syndrome);
- else
+ else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx))
mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr,
- &fdev->tls->idr_spinlock,
+ &fdev->tls->tx_idr_spinlock,
+ ctx->swid);
+ else
+ mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr,
+ &fdev->tls->rx_idr_spinlock,
ctx->swid);
}
mlx5_fpga_tls_put_command_ctx(cmd);
@@ -196,6 +208,40 @@ static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd)
MLX5_GET(tls_flow, flow, direction_sx));
}
+int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
+ u64 rcd_sn)
+{
+ struct mlx5_fpga_dma_buf *buf;
+ int size = sizeof(*buf) + MLX5_TLS_COMMAND_SIZE;
+ void *flow;
+ void *cmd;
+ int ret;
+
+ buf = kzalloc(size, GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+
+ cmd = (buf + 1);
+
+ rcu_read_lock();
+ flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
+ rcu_read_unlock();
+ mlx5_fpga_tls_flow_to_cmd(flow, cmd);
+
+ MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
+ MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn));
+ MLX5_SET(tls_cmd, cmd, tcp_sn, seq);
+ MLX5_SET(tls_cmd, cmd, command_type, CMD_RESYNC_RX);
+
+ buf->sg[0].data = cmd;
+ buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
+ buf->complete = mlx_tls_kfree_complete;
+
+ ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
+
+ return ret;
+}
+
static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
void *flow, u32 swid, gfp_t flags)
{
@@ -223,14 +269,18 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
mlx5_fpga_tls_teardown_completion);
}
-void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid,
- gfp_t flags)
+void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
+ gfp_t flags, bool direction_sx)
{
struct mlx5_fpga_tls *tls = mdev->fpga->tls;
void *flow;
rcu_read_lock();
- flow = idr_find(&tls->tx_idr, swid);
+ if (direction_sx)
+ flow = idr_find(&tls->tx_idr, swid);
+ else
+ flow = idr_find(&tls->rx_idr, swid);
+
rcu_read_unlock();
if (!flow) {
@@ -289,9 +339,11 @@ mlx5_fpga_tls_setup_completion(struct mlx5_fpga_conn *conn,
* the command context because we might not have received
* the tx completion yet.
*/
- mlx5_fpga_tls_del_tx_flow(fdev->mdev,
- MLX5_GET(tls_cmd, tls_cmd, swid),
- GFP_ATOMIC);
+ mlx5_fpga_tls_del_flow(fdev->mdev,
+ MLX5_GET(tls_cmd, tls_cmd, swid),
+ GFP_ATOMIC,
+ MLX5_GET(tls_cmd, tls_cmd,
+ direction_sx));
}
mlx5_fpga_tls_put_command_ctx(cmd);
@@ -415,8 +467,7 @@ int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev)
if (err)
goto error;
- if (!(tls->caps & (MLX5_ACCEL_TLS_TX | MLX5_ACCEL_TLS_V12 |
- MLX5_ACCEL_TLS_AES_GCM128))) {
+ if (!(tls->caps & (MLX5_ACCEL_TLS_V12 | MLX5_ACCEL_TLS_AES_GCM128))) {
err = -ENOTSUPP;
goto error;
}
@@ -438,7 +489,9 @@ int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev)
INIT_LIST_HEAD(&tls->pending_cmds);
idr_init(&tls->tx_idr);
- spin_lock_init(&tls->idr_spinlock);
+ idr_init(&tls->rx_idr);
+ spin_lock_init(&tls->tx_idr_spinlock);
+ spin_lock_init(&tls->rx_idr_spinlock);
fdev->tls = tls;
return 0;
@@ -500,9 +553,9 @@ static int mlx5_fpga_tls_set_key_material(void *cmd, u32 caps,
return 0;
}
-static int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info, u32 swid,
- u32 tcp_sn)
+static int _mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 swid, u32 tcp_sn)
{
u32 caps = mlx5_fpga_tls_device_caps(mdev);
struct mlx5_setup_stream_context *ctx;
@@ -533,30 +586,42 @@ out:
return ret;
}
-int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid)
+int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid,
+ bool direction_sx)
{
struct mlx5_fpga_tls *tls = mdev->fpga->tls;
int ret = -ENOMEM;
u32 swid;
- ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr, &tls->idr_spinlock, flow);
+ if (direction_sx)
+ ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr,
+ &tls->tx_idr_spinlock, flow);
+ else
+ ret = mlx5_fpga_tls_alloc_swid(&tls->rx_idr,
+ &tls->rx_idr_spinlock, flow);
+
if (ret < 0)
return ret;
swid = ret;
- MLX5_SET(tls_flow, flow, direction_sx, 1);
+ MLX5_SET(tls_flow, flow, direction_sx, direction_sx ? 1 : 0);
- ret = mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid,
- start_offload_tcp_sn);
+ ret = _mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid,
+ start_offload_tcp_sn);
if (ret && ret != -EINTR)
goto free_swid;
*p_swid = swid;
return 0;
free_swid:
- mlx5_fpga_tls_release_swid(&tls->tx_idr, &tls->idr_spinlock, swid);
+ if (direction_sx)
+ mlx5_fpga_tls_release_swid(&tls->tx_idr,
+ &tls->tx_idr_spinlock, swid);
+ else
+ mlx5_fpga_tls_release_swid(&tls->rx_idr,
+ &tls->rx_idr_spinlock, swid);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
index 800a214e4e49..3b2e37bf76fe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
@@ -46,15 +46,18 @@ struct mlx5_fpga_tls {
struct mlx5_fpga_conn *conn;
struct idr tx_idr;
- spinlock_t idr_spinlock; /* protects the IDR */
+ struct idr rx_idr;
+ spinlock_t tx_idr_spinlock; /* protects the IDR */
+ spinlock_t rx_idr_spinlock; /* protects the IDR */
};
-int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid);
+int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid,
+ bool direction_sx);
-void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid,
- gfp_t flags);
+void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
+ gfp_t flags, bool direction_sx);
bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev);
int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev);
@@ -65,4 +68,7 @@ static inline u32 mlx5_fpga_tls_device_caps(struct mlx5_core_dev *mdev)
return mdev->fpga->tls->caps;
}
+int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
+ u64 rcd_sn);
+
#endif /* __MLX5_FPGA_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 5a00deff5457..6a62b84e57f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -349,9 +349,15 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
- MLX5_SET(vlan, vlan, ethtype, fte->action.vlan.ethtype);
- MLX5_SET(vlan, vlan, vid, fte->action.vlan.vid);
- MLX5_SET(vlan, vlan, prio, fte->action.vlan.prio);
+ MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
+ MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
+ MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
+
+ vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
+
+ MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
+ MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
+ MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
match_value);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 6ddb2565884d..261cb6aacf12 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1465,7 +1465,9 @@ static bool check_conflicting_actions(u32 action1, u32 action2)
MLX5_FLOW_CONTEXT_ACTION_DECAP |
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
- MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH))
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 |
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
return true;
return false;
@@ -1824,7 +1826,7 @@ search_again_locked:
g = alloc_auto_flow_group(ft, spec);
if (IS_ERR(g)) {
- rule = (void *)g;
+ rule = ERR_CAST(g);
up_write_ref_node(&ft->node);
return rule;
}
@@ -1874,7 +1876,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
struct mlx5_flow_spec *spec,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest,
- int dest_num)
+ int num_dest)
{
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
struct mlx5_flow_destination gen_dest = {};
@@ -1887,7 +1889,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
if (!fwd_next_prio_supported(ft))
return ERR_PTR(-EOPNOTSUPP);
- if (dest_num)
+ if (num_dest)
return ERR_PTR(-EINVAL);
mutex_lock(&root->chain_lock);
next_ft = find_next_chained_ft(prio);
@@ -1895,7 +1897,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
gen_dest.ft = next_ft;
dest = &gen_dest;
- dest_num = 1;
+ num_dest = 1;
flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
} else {
mutex_unlock(&root->chain_lock);
@@ -1903,7 +1905,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
}
}
- handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, dest_num);
+ handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
if (!IS_ERR_OR_NULL(handle) &&
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
index a8eecedd46c2..02e2e4575e4f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
@@ -33,8 +33,15 @@
#ifndef __LIB_CLOCK_H__
#define __LIB_CLOCK_H__
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
void mlx5_init_clock(struct mlx5_core_dev *mdev);
void mlx5_cleanup_clock(struct mlx5_core_dev *mdev);
+void mlx5_pps_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
+
+static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
+{
+ return mdev->clock.ptp ? ptp_clock_index(mdev->clock.ptp) : -1;
+}
static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
u64 timestamp)
@@ -48,4 +55,21 @@ static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
return ns_to_ktime(nsec);
}
+#else
+static inline void mlx5_init_clock(struct mlx5_core_dev *mdev) {}
+static inline void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) {}
+static inline void mlx5_pps_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) {}
+
+static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
+{
+ return -1;
+}
+
+static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
+ u64 timestamp)
+{
+ return 0;
+}
+#endif
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
new file mode 100644
index 000000000000..9a8fd762167b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mlx5/driver.h>
+#include "mlx5_core.h"
+#include "vxlan.h"
+
+struct mlx5_vxlan {
+ struct mlx5_core_dev *mdev;
+ spinlock_t lock; /* protect vxlan table */
+ /* max_num_ports is usuallly 4, 16 buckets is more than enough */
+ DECLARE_HASHTABLE(htable, 4);
+ int num_ports;
+ struct mutex sync_lock; /* sync add/del port HW operations */
+};
+
+struct mlx5_vxlan_port {
+ struct hlist_node hlist;
+ atomic_t refcount;
+ u16 udp_port;
+};
+
+static inline u8 mlx5_vxlan_max_udp_ports(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_ETH(mdev, max_vxlan_udp_ports) ?: 4;
+}
+
+static int mlx5_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port)
+{
+ u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)] = {0};
+
+ MLX5_SET(add_vxlan_udp_dport_in, in, opcode,
+ MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT);
+ MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port);
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
+{
+ u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)] = {0};
+
+ MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
+ MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
+ MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port);
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static struct mlx5_vxlan_port*
+mlx5_vxlan_lookup_port_locked(struct mlx5_vxlan *vxlan, u16 port)
+{
+ struct mlx5_vxlan_port *vxlanp;
+
+ hash_for_each_possible(vxlan->htable, vxlanp, hlist, port) {
+ if (vxlanp->udp_port == port)
+ return vxlanp;
+ }
+
+ return NULL;
+}
+
+struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port)
+{
+ struct mlx5_vxlan_port *vxlanp;
+
+ if (!mlx5_vxlan_allowed(vxlan))
+ return NULL;
+
+ spin_lock_bh(&vxlan->lock);
+ vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port);
+ spin_unlock_bh(&vxlan->lock);
+
+ return vxlanp;
+}
+
+int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port)
+{
+ struct mlx5_vxlan_port *vxlanp;
+ int ret = -ENOSPC;
+
+ vxlanp = mlx5_vxlan_lookup_port(vxlan, port);
+ if (vxlanp) {
+ atomic_inc(&vxlanp->refcount);
+ return 0;
+ }
+
+ mutex_lock(&vxlan->sync_lock);
+ if (vxlan->num_ports >= mlx5_vxlan_max_udp_ports(vxlan->mdev)) {
+ mlx5_core_info(vxlan->mdev,
+ "UDP port (%d) not offloaded, max number of UDP ports (%d) are already offloaded\n",
+ port, mlx5_vxlan_max_udp_ports(vxlan->mdev));
+ ret = -ENOSPC;
+ goto unlock;
+ }
+
+ ret = mlx5_vxlan_core_add_port_cmd(vxlan->mdev, port);
+ if (ret)
+ goto unlock;
+
+ vxlanp = kzalloc(sizeof(*vxlanp), GFP_KERNEL);
+ if (!vxlanp) {
+ ret = -ENOMEM;
+ goto err_delete_port;
+ }
+
+ vxlanp->udp_port = port;
+ atomic_set(&vxlanp->refcount, 1);
+
+ spin_lock_bh(&vxlan->lock);
+ hash_add(vxlan->htable, &vxlanp->hlist, port);
+ spin_unlock_bh(&vxlan->lock);
+
+ vxlan->num_ports++;
+ mutex_unlock(&vxlan->sync_lock);
+ return 0;
+
+err_delete_port:
+ mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
+
+unlock:
+ mutex_unlock(&vxlan->sync_lock);
+ return ret;
+}
+
+int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port)
+{
+ struct mlx5_vxlan_port *vxlanp;
+ bool remove = false;
+ int ret = 0;
+
+ mutex_lock(&vxlan->sync_lock);
+
+ spin_lock_bh(&vxlan->lock);
+ vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port);
+ if (!vxlanp) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ if (atomic_dec_and_test(&vxlanp->refcount)) {
+ hash_del(&vxlanp->hlist);
+ remove = true;
+ }
+
+out_unlock:
+ spin_unlock_bh(&vxlan->lock);
+
+ if (remove) {
+ mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
+ kfree(vxlanp);
+ vxlan->num_ports--;
+ }
+
+ mutex_unlock(&vxlan->sync_lock);
+
+ return ret;
+}
+
+struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_vxlan *vxlan;
+
+ if (!MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) || !mlx5_core_is_pf(mdev))
+ return ERR_PTR(-ENOTSUPP);
+
+ vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
+ if (!vxlan)
+ return ERR_PTR(-ENOMEM);
+
+ vxlan->mdev = mdev;
+ mutex_init(&vxlan->sync_lock);
+ spin_lock_init(&vxlan->lock);
+ hash_init(vxlan->htable);
+
+ /* Hardware adds 4789 by default */
+ mlx5_vxlan_add_port(vxlan, 4789);
+
+ return vxlan;
+}
+
+void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan)
+{
+ struct mlx5_vxlan_port *vxlanp;
+ struct hlist_node *tmp;
+ int bkt;
+
+ if (!mlx5_vxlan_allowed(vxlan))
+ return;
+
+ /* Lockless since we are the only hash table consumers*/
+ hash_for_each_safe(vxlan->htable, bkt, tmp, vxlanp, hlist) {
+ hash_del(&vxlanp->hlist);
+ mlx5_vxlan_core_del_port_cmd(vxlan->mdev, vxlanp->udp_port);
+ kfree(vxlanp);
+ }
+
+ kfree(vxlan);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
index 5ef6ae7d568a..8fb0eb08fa6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
@@ -33,31 +33,32 @@
#define __MLX5_VXLAN_H__
#include <linux/mlx5/driver.h>
-#include "en.h"
-struct mlx5e_vxlan {
- atomic_t refcount;
- u16 udp_port;
-};
+struct mlx5_vxlan;
+struct mlx5_vxlan_port;
-struct mlx5e_vxlan_work {
- struct work_struct work;
- struct mlx5e_priv *priv;
- sa_family_t sa_family;
- u16 port;
-};
-
-static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
+static inline bool mlx5_vxlan_allowed(struct mlx5_vxlan *vxlan)
{
- return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) &&
- mlx5_core_is_pf(mdev));
+ /* not allowed reason is encoded in vxlan pointer as error,
+ * on mlx5_vxlan_create
+ */
+ return !IS_ERR_OR_NULL(vxlan);
}
-void mlx5e_vxlan_init(struct mlx5e_priv *priv);
-void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv);
-
-void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
- u16 port, int add);
-struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port);
+#if IS_ENABLED(CONFIG_VXLAN)
+struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev);
+void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan);
+int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port);
+int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port);
+struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port);
+#else
+static inline struct mlx5_vxlan*
+mlx5_vxlan_create(struct mlx5_core_dev *mdev) { return ERR_PTR(-EOPNOTSUPP); }
+static inline void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan) { return; }
+static inline int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; }
+static inline int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; }
+static inline struct mx5_vxlan_port*
+mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) { return NULL; }
+#endif
#endif /* __MLX5_VXLAN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 615005e63819..cf3e4a659052 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -62,9 +62,11 @@
#include "accel/ipsec.h"
#include "accel/tls.h"
#include "lib/clock.h"
+#include "lib/vxlan.h"
+#include "diag/fw_tracer.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
-MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
+MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRIVER_VERSION);
@@ -321,7 +323,9 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
struct mlx5_eq_table *table = &priv->eq_table;
- int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
+ int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
+ MLX5_CAP_GEN(dev, max_num_eqs) :
+ 1 << MLX5_CAP_GEN(dev, log_max_eq);
int nvec;
int err;
@@ -960,6 +964,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_init_clock(dev);
+ dev->vxlan = mlx5_vxlan_create(dev);
+
err = mlx5_init_rl_table(dev);
if (err) {
dev_err(&pdev->dev, "Failed to init rate limiting\n");
@@ -990,6 +996,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
goto err_sriov_cleanup;
}
+ dev->tracer = mlx5_fw_tracer_create(dev);
+
return 0;
err_sriov_cleanup:
@@ -1001,6 +1009,7 @@ err_mpfs_cleanup:
err_rl_cleanup:
mlx5_cleanup_rl_table(dev);
err_tables_cleanup:
+ mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
@@ -1015,11 +1024,13 @@ out:
static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
{
+ mlx5_fw_tracer_destroy(dev->tracer);
mlx5_fpga_cleanup(dev);
mlx5_sriov_cleanup(dev);
mlx5_eswitch_cleanup(dev->priv.eswitch);
mlx5_mpfs_cleanup(dev);
mlx5_cleanup_rl_table(dev);
+ mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_clock(dev);
mlx5_cleanup_reserved_gids(dev);
mlx5_cleanup_mkey_table(dev);
@@ -1167,10 +1178,16 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_put_uars;
}
+ err = mlx5_fw_tracer_init(dev->tracer);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init FW tracer\n");
+ goto err_fw_tracer;
+ }
+
err = alloc_comp_eqs(dev);
if (err) {
dev_err(&pdev->dev, "Failed to alloc completion EQs\n");
- goto err_stop_eqs;
+ goto err_comp_eqs;
}
err = mlx5_irq_set_affinity_hints(dev);
@@ -1252,7 +1269,10 @@ err_fpga_start:
err_affinity_hints:
free_comp_eqs(dev);
-err_stop_eqs:
+err_comp_eqs:
+ mlx5_fw_tracer_cleanup(dev->tracer);
+
+err_fw_tracer:
mlx5_stop_eqs(dev);
err_put_uars:
@@ -1320,6 +1340,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_fpga_device_stop(dev);
mlx5_irq_clear_affinity_hints(dev);
free_comp_eqs(dev);
+ mlx5_fw_tracer_cleanup(dev->tracer);
mlx5_stop_eqs(dev);
mlx5_put_uars_page(dev, priv->uar);
mlx5_free_irq_vectors(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 023882d9a22e..b4134fa0bba3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -66,6 +66,12 @@ do { \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
+#define mlx5_core_err_rl(__dev, format, ...) \
+ dev_err_ratelimited(&(__dev)->pdev->dev, \
+ "%s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
+
#define mlx5_core_warn(__dev, format, ...) \
dev_warn(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
__func__, __LINE__, current->pid, \
@@ -93,7 +99,6 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
unsigned long param);
void mlx5_core_page_fault(struct mlx5_core_dev *dev,
struct mlx5_pagefault *pfault);
-void mlx5_pps_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force);
void mlx5_disable_device(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index f4f02f775c93..0670165afd5f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -146,23 +146,6 @@ int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
}
EXPORT_SYMBOL(mlx5_core_query_mkey);
-int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
- u32 *mkey)
-{
- u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {0};
- int err;
-
- MLX5_SET(query_special_contexts_in, in, opcode,
- MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
- if (!err)
- *mkey = MLX5_GET(query_special_contexts_out, out,
- dump_fill_mkey);
- return err;
-}
-EXPORT_SYMBOL(mlx5_core_dump_fill_mkey);
-
static inline u32 mlx5_get_psv(u32 *out, int psv_index)
{
switch (psv_index) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 7eecd5b07bb1..b02af317c125 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -62,17 +62,6 @@ u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
return MLX5_GET(query_vport_state_out, out, state);
}
-EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
-
-u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
-{
- u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
-
- _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
-
- return MLX5_GET(query_vport_state_out, out, admin_state);
-}
-EXPORT_SYMBOL_GPL(mlx5_query_vport_admin_state);
int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u8 state)
@@ -90,7 +79,6 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
-EXPORT_SYMBOL_GPL(mlx5_modify_vport_admin_state);
static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
u32 *out, int outlen)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
deleted file mode 100644
index 2f74953e4561..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies, Ltd. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mlx5/driver.h>
-#include "mlx5_core.h"
-#include "vxlan.h"
-
-void mlx5e_vxlan_init(struct mlx5e_priv *priv)
-{
- struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
-
- spin_lock_init(&vxlan_db->lock);
- INIT_RADIX_TREE(&vxlan_db->tree, GFP_ATOMIC);
-}
-
-static int mlx5e_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port)
-{
- u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)] = {0};
-
- MLX5_SET(add_vxlan_udp_dport_in, in, opcode,
- MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT);
- MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-}
-
-static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
-{
- u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)] = {0};
-
- MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
- MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
- MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-}
-
-struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
-{
- struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
- struct mlx5e_vxlan *vxlan;
-
- spin_lock_bh(&vxlan_db->lock);
- vxlan = radix_tree_lookup(&vxlan_db->tree, port);
- spin_unlock_bh(&vxlan_db->lock);
-
- return vxlan;
-}
-
-static void mlx5e_vxlan_add_port(struct work_struct *work)
-{
- struct mlx5e_vxlan_work *vxlan_work =
- container_of(work, struct mlx5e_vxlan_work, work);
- struct mlx5e_priv *priv = vxlan_work->priv;
- struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
- u16 port = vxlan_work->port;
- struct mlx5e_vxlan *vxlan;
- int err;
-
- mutex_lock(&priv->state_lock);
- vxlan = mlx5e_vxlan_lookup_port(priv, port);
- if (vxlan) {
- atomic_inc(&vxlan->refcount);
- goto free_work;
- }
-
- if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
- goto free_work;
-
- vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
- if (!vxlan)
- goto err_delete_port;
-
- vxlan->udp_port = port;
- atomic_set(&vxlan->refcount, 1);
-
- spin_lock_bh(&vxlan_db->lock);
- err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan);
- spin_unlock_bh(&vxlan_db->lock);
- if (err)
- goto err_free;
-
- goto free_work;
-
-err_free:
- kfree(vxlan);
-err_delete_port:
- mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
-free_work:
- mutex_unlock(&priv->state_lock);
- kfree(vxlan_work);
-}
-
-static void mlx5e_vxlan_del_port(struct work_struct *work)
-{
- struct mlx5e_vxlan_work *vxlan_work =
- container_of(work, struct mlx5e_vxlan_work, work);
- struct mlx5e_priv *priv = vxlan_work->priv;
- struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
- u16 port = vxlan_work->port;
- struct mlx5e_vxlan *vxlan;
- bool remove = false;
-
- mutex_lock(&priv->state_lock);
- spin_lock_bh(&vxlan_db->lock);
- vxlan = radix_tree_lookup(&vxlan_db->tree, port);
- if (!vxlan)
- goto out_unlock;
-
- if (atomic_dec_and_test(&vxlan->refcount)) {
- radix_tree_delete(&vxlan_db->tree, port);
- remove = true;
- }
-
-out_unlock:
- spin_unlock_bh(&vxlan_db->lock);
-
- if (remove) {
- mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
- kfree(vxlan);
- }
- mutex_unlock(&priv->state_lock);
- kfree(vxlan_work);
-}
-
-void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
- u16 port, int add)
-{
- struct mlx5e_vxlan_work *vxlan_work;
-
- vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
- if (!vxlan_work)
- return;
-
- if (add)
- INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_port);
- else
- INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_port);
-
- vxlan_work->priv = priv;
- vxlan_work->port = port;
- vxlan_work->sa_family = sa_family;
- queue_work(priv->wq, &vxlan_work->work);
-}
-
-void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
-{
- struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
- struct mlx5e_vxlan *vxlan;
- unsigned int port = 0;
-
- /* Lockless since we are the only radix-tree consumers, wq is disabled */
- while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) {
- port = vxlan->udp_port;
- radix_tree_delete(&vxlan_db->tree, port);
- mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
- kfree(vxlan);
- }
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index 0b47126815b6..2bd4c3184eba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -229,6 +229,11 @@ static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq)
return !wq->cur_sz;
}
+static inline int mlx5_wq_ll_missing(struct mlx5_wq_ll *wq)
+{
+ return wq->fbc.sz_m1 - wq->cur_sz;
+}
+
static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix)
{
return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 82827a8d3d67..8a291eb36c64 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -78,6 +78,7 @@ config MLXSW_SPECTRUM
depends on IPV6 || IPV6=n
depends on NET_IPGRE || NET_IPGRE=n
depends on IPV6_GRE || IPV6_GRE=n
+ select GENERIC_ALLOCATOR
select PARMAN
select MLXFW
default m
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 0cadcabfe86f..68fa44a41485 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -15,11 +15,18 @@ mlxsw_switchx2-objs := switchx2.o
obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_switchdev.o spectrum_router.o \
- spectrum_kvdl.o spectrum_acl_tcam.o \
- spectrum_acl.o spectrum_flower.o \
- spectrum_cnt.o spectrum_fid.o \
- spectrum_ipip.o spectrum_acl_flex_actions.o \
- spectrum_mr.o spectrum_mr_tcam.o \
+ spectrum1_kvdl.o spectrum2_kvdl.o \
+ spectrum_kvdl.o \
+ spectrum_acl_tcam.o spectrum_acl_ctcam.o \
+ spectrum_acl_atcam.o spectrum_acl_erp.o \
+ spectrum1_acl_tcam.o spectrum2_acl_tcam.o \
+ spectrum_acl.o \
+ spectrum_flower.o spectrum_cnt.o \
+ spectrum_fid.o spectrum_ipip.o \
+ spectrum_acl_flex_actions.o \
+ spectrum_acl_flex_keys.o \
+ spectrum1_mr_tcam.o spectrum2_mr_tcam.o \
+ spectrum_mr_tcam.o spectrum_mr.o \
spectrum_qdisc.o spectrum_span.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index 2bc48054b685..0772e4339b33 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -1,37 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/cmd.h
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_CMD_H
#define _MLXSW_CMD_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index f9c724752a32..81533d7f395c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1,38 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/core.c
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -759,15 +726,6 @@ static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
return mlxsw_driver;
}
-static void mlxsw_core_driver_put(const char *kind)
-{
- struct mlxsw_driver *mlxsw_driver;
-
- spin_lock(&mlxsw_core_driver_list_lock);
- mlxsw_driver = __driver_find(kind);
- spin_unlock(&mlxsw_core_driver_list_lock);
-}
-
static int mlxsw_devlink_port_split(struct devlink *devlink,
unsigned int port_index,
unsigned int count,
@@ -1115,7 +1073,6 @@ err_bus_init:
if (!reload)
devlink_free(devlink);
err_devlink_alloc:
- mlxsw_core_driver_put(device_kind);
return err;
}
EXPORT_SYMBOL(mlxsw_core_bus_device_register);
@@ -1123,7 +1080,6 @@ EXPORT_SYMBOL(mlxsw_core_bus_device_register);
void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
bool reload)
{
- const char *device_kind = mlxsw_core->bus_info->device_kind;
struct devlink *devlink = priv_to_devlink(mlxsw_core);
if (mlxsw_core->reload_fail)
@@ -1144,7 +1100,6 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
return;
reload_fail:
devlink_free(devlink);
- mlxsw_core_driver_put(device_kind);
}
EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 552cfa29c2f7..655ddd204ab2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -1,38 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/core.h
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_CORE_H
#define _MLXSW_CORE_H
@@ -362,6 +329,7 @@ struct mlxsw_fw_rev {
u16 major;
u16 minor;
u16 subminor;
+ u16 can_reset_minor;
};
struct mlxsw_bus_info {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index f6f6a568d66a..c51b2adfc1e1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
- * Copyright (c) 2017, 2018 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/types.h>
@@ -355,9 +324,24 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
block->first_set = mlxsw_afa_set_create(true);
if (!block->first_set)
goto err_first_set_create;
- block->cur_set = block->first_set;
+
+ /* In case user instructs to have dummy first set, we leave it
+ * empty here and create another, real, set right away.
+ */
+ if (mlxsw_afa->ops->dummy_first_set) {
+ block->cur_set = mlxsw_afa_set_create(false);
+ if (!block->cur_set)
+ goto err_second_set_create;
+ block->cur_set->prev = block->first_set;
+ block->first_set->next = block->cur_set;
+ } else {
+ block->cur_set = block->first_set;
+ }
+
return block;
+err_second_set_create:
+ mlxsw_afa_set_destroy(block->first_set);
err_first_set_create:
kfree(block);
return NULL;
@@ -419,11 +403,31 @@ char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block)
}
EXPORT_SYMBOL(mlxsw_afa_block_first_set);
-u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block)
+char *mlxsw_afa_block_cur_set(struct mlxsw_afa_block *block)
{
- return block->first_set->kvdl_index;
+ return block->cur_set->ht_key.enc_actions;
}
-EXPORT_SYMBOL(mlxsw_afa_block_first_set_kvdl_index);
+EXPORT_SYMBOL(mlxsw_afa_block_cur_set);
+
+u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block)
+{
+ /* First set is never in KVD linear. So the first set
+ * with valid KVD linear index is always the second one.
+ */
+ if (WARN_ON(!block->first_set->next))
+ return 0;
+ return block->first_set->next->kvdl_index;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_first_kvdl_index);
+
+int mlxsw_afa_block_activity_get(struct mlxsw_afa_block *block, bool *activity)
+{
+ u32 kvdl_index = mlxsw_afa_block_first_kvdl_index(block);
+
+ return block->afa->ops->kvdl_set_activity_get(block->afa->ops_priv,
+ kvdl_index, activity);
+}
+EXPORT_SYMBOL(mlxsw_afa_block_activity_get);
int mlxsw_afa_block_continue(struct mlxsw_afa_block *block)
{
@@ -724,14 +728,17 @@ mlxsw_afa_vlan_pack(char *payload,
}
int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
- u16 vid, u8 pcp, u8 et)
+ u16 vid, u8 pcp, u8 et,
+ struct netlink_ext_ack *extack)
{
char *act = mlxsw_afa_block_append_action(block,
MLXSW_AFA_VLAN_CODE,
MLXSW_AFA_VLAN_SIZE);
- if (IS_ERR(act))
+ if (IS_ERR(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append vlan_modify action");
return PTR_ERR(act);
+ }
mlxsw_afa_vlan_pack(act, MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
MLXSW_AFA_VLAN_CMD_SET_OUTER, vid,
MLXSW_AFA_VLAN_CMD_SET_OUTER, pcp,
@@ -925,19 +932,23 @@ mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block,
int
mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, u8 local_in_port,
- const struct net_device *out_dev, bool ingress)
+ const struct net_device *out_dev, bool ingress,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_afa_mirror *mirror;
int err;
mirror = mlxsw_afa_mirror_create(block, local_in_port, out_dev,
ingress);
- if (IS_ERR(mirror))
+ if (IS_ERR(mirror)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot create mirror action");
return PTR_ERR(mirror);
-
+ }
err = mlxsw_afa_block_append_allocated_mirror(block, mirror->span_id);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append mirror action");
goto err_append_allocated_mirror;
+ }
return 0;
@@ -987,23 +998,29 @@ mlxsw_afa_forward_pack(char *payload, enum mlxsw_afa_forward_type type,
}
int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
- u8 local_port, bool in_port)
+ u8 local_port, bool in_port,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
u32 kvdl_index;
char *act;
int err;
- if (in_port)
+ if (in_port) {
+ NL_SET_ERR_MSG_MOD(extack, "Forwarding to ingress port is not supported");
return -EOPNOTSUPP;
+ }
fwd_entry_ref = mlxsw_afa_fwd_entry_ref_create(block, local_port);
- if (IS_ERR(fwd_entry_ref))
+ if (IS_ERR(fwd_entry_ref)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot create forward action");
return PTR_ERR(fwd_entry_ref);
+ }
kvdl_index = fwd_entry_ref->fwd_entry->kvdl_index;
act = mlxsw_afa_block_append_action(block, MLXSW_AFA_FORWARD_CODE,
MLXSW_AFA_FORWARD_SIZE);
if (IS_ERR(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append forward action");
err = PTR_ERR(act);
goto err_append_action;
}
@@ -1068,21 +1085,25 @@ int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
EXPORT_SYMBOL(mlxsw_afa_block_append_allocated_counter);
int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
- u32 *p_counter_index)
+ u32 *p_counter_index,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_afa_counter *counter;
u32 counter_index;
int err;
counter = mlxsw_afa_counter_create(block);
- if (IS_ERR(counter))
+ if (IS_ERR(counter)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot create count action");
return PTR_ERR(counter);
+ }
counter_index = counter->counter_index;
err = mlxsw_afa_block_append_allocated_counter(block, counter_index);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append count action");
goto err_append_allocated_counter;
-
+ }
if (p_counter_index)
*p_counter_index = counter_index;
return 0;
@@ -1125,13 +1146,16 @@ static inline void mlxsw_afa_virfwd_pack(char *payload,
mlxsw_afa_virfwd_fid_set(payload, fid);
}
-int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid)
+int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid,
+ struct netlink_ext_ack *extack)
{
char *act = mlxsw_afa_block_append_action(block,
MLXSW_AFA_VIRFWD_CODE,
MLXSW_AFA_VIRFWD_SIZE);
- if (IS_ERR(act))
+ if (IS_ERR(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append fid_set action");
return PTR_ERR(act);
+ }
mlxsw_afa_virfwd_pack(act, MLXSW_AFA_VIRFWD_FID_CMD_SET, fid);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index 3a155d104384..0e3a59dda12e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_CORE_ACL_FLEX_ACTIONS_H
#define _MLXSW_CORE_ACL_FLEX_ACTIONS_H
@@ -45,6 +14,8 @@ struct mlxsw_afa_ops {
int (*kvdl_set_add)(void *priv, u32 *p_kvdl_index,
char *enc_actions, bool is_first);
void (*kvdl_set_del)(void *priv, u32 kvdl_index, bool is_first);
+ int (*kvdl_set_activity_get)(void *priv, u32 kvdl_index,
+ bool *activity);
int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u8 local_port);
void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index);
int (*counter_index_get)(void *priv, unsigned int *p_counter_index);
@@ -54,6 +25,7 @@ struct mlxsw_afa_ops {
bool ingress, int *p_span_id);
void (*mirror_del)(void *priv, u8 local_in_port, int span_id,
bool ingress);
+ bool dummy_first_set;
};
struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set,
@@ -64,7 +36,9 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa);
void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block);
int mlxsw_afa_block_commit(struct mlxsw_afa_block *block);
char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block);
-u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block);
+char *mlxsw_afa_block_cur_set(struct mlxsw_afa_block *block);
+u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block);
+int mlxsw_afa_block_activity_get(struct mlxsw_afa_block *block, bool *activity);
int mlxsw_afa_block_continue(struct mlxsw_afa_block *block);
int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block);
@@ -75,16 +49,21 @@ int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
int mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block,
u8 local_in_port,
const struct net_device *out_dev,
- bool ingress);
+ bool ingress,
+ struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
- u8 local_port, bool in_port);
+ u8 local_port, bool in_port,
+ struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
- u16 vid, u8 pcp, u8 et);
+ u16 vid, u8 pcp, u8 et,
+ struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
u32 counter_index);
int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
- u32 *p_counter_index);
-int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid);
+ u32 *p_counter_index,
+ struct netlink_ext_ack *extack);
+int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid,
+ struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
u16 expected_irif, u16 min_mtu,
bool rmid_valid, u32 kvdl_index);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index b32a00972e83..785bf01fe2be 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -43,6 +12,7 @@
struct mlxsw_afk {
struct list_head key_info_list;
unsigned int max_blocks;
+ const struct mlxsw_afk_ops *ops;
const struct mlxsw_afk_block *blocks;
unsigned int blocks_count;
};
@@ -69,8 +39,7 @@ static bool mlxsw_afk_blocks_check(struct mlxsw_afk *mlxsw_afk)
}
struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
- const struct mlxsw_afk_block *blocks,
- unsigned int blocks_count)
+ const struct mlxsw_afk_ops *ops)
{
struct mlxsw_afk *mlxsw_afk;
@@ -79,8 +48,9 @@ struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
return NULL;
INIT_LIST_HEAD(&mlxsw_afk->key_info_list);
mlxsw_afk->max_blocks = max_blocks;
- mlxsw_afk->blocks = blocks;
- mlxsw_afk->blocks_count = blocks_count;
+ mlxsw_afk->ops = ops;
+ mlxsw_afk->blocks = ops->blocks;
+ mlxsw_afk->blocks_count = ops->blocks_count;
WARN_ON(!mlxsw_afk_blocks_check(mlxsw_afk));
return mlxsw_afk;
}
@@ -415,61 +385,76 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values,
}
EXPORT_SYMBOL(mlxsw_afk_values_add_buf);
-static void mlxsw_afk_encode_u32(const struct mlxsw_item *storage_item,
- const struct mlxsw_item *output_item,
- char *storage, char *output_indexed)
+static void mlxsw_sp_afk_encode_u32(const struct mlxsw_item *storage_item,
+ const struct mlxsw_item *output_item,
+ char *storage, char *output)
{
u32 value;
value = __mlxsw_item_get32(storage, storage_item, 0);
- __mlxsw_item_set32(output_indexed, output_item, 0, value);
+ __mlxsw_item_set32(output, output_item, 0, value);
}
-static void mlxsw_afk_encode_buf(const struct mlxsw_item *storage_item,
- const struct mlxsw_item *output_item,
- char *storage, char *output_indexed)
+static void mlxsw_sp_afk_encode_buf(const struct mlxsw_item *storage_item,
+ const struct mlxsw_item *output_item,
+ char *storage, char *output)
{
char *storage_data = __mlxsw_item_data(storage, storage_item, 0);
- char *output_data = __mlxsw_item_data(output_indexed, output_item, 0);
+ char *output_data = __mlxsw_item_data(output, output_item, 0);
size_t len = output_item->size.bytes;
memcpy(output_data, storage_data, len);
}
-#define MLXSW_AFK_KEY_BLOCK_SIZE 16
-
-static void mlxsw_afk_encode_one(const struct mlxsw_afk_element_inst *elinst,
- int block_index, char *storage, char *output)
+static void
+mlxsw_sp_afk_encode_one(const struct mlxsw_afk_element_inst *elinst,
+ char *output, char *storage)
{
- char *output_indexed = output + block_index * MLXSW_AFK_KEY_BLOCK_SIZE;
const struct mlxsw_item *storage_item = &elinst->info->item;
const struct mlxsw_item *output_item = &elinst->item;
if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_U32)
- mlxsw_afk_encode_u32(storage_item, output_item,
- storage, output_indexed);
+ mlxsw_sp_afk_encode_u32(storage_item, output_item,
+ storage, output);
else if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_BUF)
- mlxsw_afk_encode_buf(storage_item, output_item,
- storage, output_indexed);
+ mlxsw_sp_afk_encode_buf(storage_item, output_item,
+ storage, output);
}
-void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info,
+#define MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE 16
+
+void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
+ struct mlxsw_afk_key_info *key_info,
struct mlxsw_afk_element_values *values,
- char *key, char *mask)
+ char *key, char *mask, int block_start, int block_end)
{
+ char block_mask[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE];
+ char block_key[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE];
const struct mlxsw_afk_element_inst *elinst;
enum mlxsw_afk_element element;
- int block_index;
+ int block_index, i;
+
+ for (i = block_start; i <= block_end; i++) {
+ memset(block_key, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE);
+ memset(block_mask, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE);
+
+ mlxsw_afk_element_usage_for_each(element, &values->elusage) {
+ elinst = mlxsw_afk_key_info_elinst_get(key_info,
+ element,
+ &block_index);
+ if (!elinst || block_index != i)
+ continue;
+
+ mlxsw_sp_afk_encode_one(elinst, block_key,
+ values->storage.key);
+ mlxsw_sp_afk_encode_one(elinst, block_mask,
+ values->storage.mask);
+ }
- mlxsw_afk_element_usage_for_each(element, &values->elusage) {
- elinst = mlxsw_afk_key_info_elinst_get(key_info, element,
- &block_index);
- if (!elinst)
- continue;
- mlxsw_afk_encode_one(elinst, block_index,
- values->storage.key, key);
- mlxsw_afk_encode_one(elinst, block_index,
- values->storage.mask, mask);
+ if (key)
+ mlxsw_afk->ops->encode_block(block_key, i, key);
+ if (mask)
+ mlxsw_afk->ops->encode_block(block_mask, i, mask);
}
}
EXPORT_SYMBOL(mlxsw_afk_encode);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index 122506daa586..c29c045d826d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_CORE_ACL_FLEX_KEYS_H
#define _MLXSW_CORE_ACL_FLEX_KEYS_H
@@ -42,16 +11,20 @@
enum mlxsw_afk_element {
MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
- MLXSW_AFK_ELEMENT_DMAC,
- MLXSW_AFK_ELEMENT_SMAC,
+ MLXSW_AFK_ELEMENT_DMAC_32_47,
+ MLXSW_AFK_ELEMENT_DMAC_0_31,
+ MLXSW_AFK_ELEMENT_SMAC_32_47,
+ MLXSW_AFK_ELEMENT_SMAC_0_31,
MLXSW_AFK_ELEMENT_ETHERTYPE,
MLXSW_AFK_ELEMENT_IP_PROTO,
- MLXSW_AFK_ELEMENT_SRC_IP4,
- MLXSW_AFK_ELEMENT_DST_IP4,
- MLXSW_AFK_ELEMENT_SRC_IP6_HI,
- MLXSW_AFK_ELEMENT_SRC_IP6_LO,
- MLXSW_AFK_ELEMENT_DST_IP6_HI,
- MLXSW_AFK_ELEMENT_DST_IP6_LO,
+ MLXSW_AFK_ELEMENT_SRC_IP_96_127,
+ MLXSW_AFK_ELEMENT_SRC_IP_64_95,
+ MLXSW_AFK_ELEMENT_SRC_IP_32_63,
+ MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ MLXSW_AFK_ELEMENT_DST_IP_96_127,
+ MLXSW_AFK_ELEMENT_DST_IP_64_95,
+ MLXSW_AFK_ELEMENT_DST_IP_32_63,
+ MLXSW_AFK_ELEMENT_DST_IP_0_31,
MLXSW_AFK_ELEMENT_DST_L4_PORT,
MLXSW_AFK_ELEMENT_SRC_L4_PORT,
MLXSW_AFK_ELEMENT_VID,
@@ -99,9 +72,11 @@ struct mlxsw_afk_element_info {
* define an internal storage geometry.
*/
static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
- MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 16),
- MLXSW_AFK_ELEMENT_INFO_BUF(DMAC, 0x04, 6),
- MLXSW_AFK_ELEMENT_INFO_BUF(SMAC, 0x0A, 6),
+ MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 8),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_32_47, 0x04, 2),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_0_31, 0x06, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SMAC_32_47, 0x0A, 2),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SMAC_0_31, 0x0C, 4),
MLXSW_AFK_ELEMENT_INFO_U32(ETHERTYPE, 0x00, 0, 16),
MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8),
MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12),
@@ -112,12 +87,14 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8),
MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2),
MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6),
- MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x20, 0, 32),
- MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x24, 0, 32),
- MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x20, 8),
- MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x28, 8),
- MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x30, 8),
- MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x38, 8),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_96_127, 0x20, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_64_95, 0x24, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_32_63, 0x28, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_0_31, 0x2C, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_96_127, 0x30, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_64_95, 0x34, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_32_63, 0x38, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_0_31, 0x3C, 4),
};
#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x40
@@ -208,9 +185,14 @@ mlxsw_afk_element_usage_subset(struct mlxsw_afk_element_usage *elusage_small,
struct mlxsw_afk;
+struct mlxsw_afk_ops {
+ const struct mlxsw_afk_block *blocks;
+ unsigned int blocks_count;
+ void (*encode_block)(char *block, int block_index, char *output);
+};
+
struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
- const struct mlxsw_afk_block *blocks,
- unsigned int blocks_count);
+ const struct mlxsw_afk_ops *ops);
void mlxsw_afk_destroy(struct mlxsw_afk *mlxsw_afk);
struct mlxsw_afk_key_info;
@@ -243,8 +225,9 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values,
enum mlxsw_afk_element element,
const char *key_value, const char *mask_value,
unsigned int len);
-void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info,
+void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
+ struct mlxsw_afk_key_info *key_info,
struct mlxsw_afk_element_values *values,
- char *key, char *mask);
+ char *key, char *mask, int block_start, int block_end);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index 84185f8dfbae..f6cf2896d337 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/types.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index d866c98c1a97..6d29dc428608 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -1,34 +1,6 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved
* Copyright (c) 2016 Ivan Vecera <cera@cera.cz>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/emad.h b/drivers/net/ethernet/mellanox/mlxsw/emad.h
index 97b6bb5d9185..a33b896f4bb8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/emad.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/emad.h
@@ -1,37 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/emad.h
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_EMAD_H
#define _MLXSW_EMAD_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index 25f9915ebd82..798bd5aca384 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/i2c.c
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#include <linux/err.h>
#include <linux/i2c.h>
@@ -46,8 +15,6 @@
#include "core.h"
#include "i2c.h"
-static const char mlxsw_i2c_driver_name[] = "mlxsw_i2c";
-
#define MLXSW_I2C_CIR2_BASE 0x72000
#define MLXSW_I2C_CIR_STATUS_OFF 0x18
#define MLXSW_I2C_CIR2_OFF_STATUS (MLXSW_I2C_CIR2_BASE + \
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.h b/drivers/net/ethernet/mellanox/mlxsw/i2c.h
index daa24b213ea4..17e059d47fae 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/i2c.h
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_I2C_H
#define _MLXSW_I2C_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/ib.h b/drivers/net/ethernet/mellanox/mlxsw/ib.h
index ce313aaa6336..2d0cb0f5eb85 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/ib.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/ib.h
@@ -1,36 +1,6 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/ib.h
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Elad Raz <eladr@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
+
#ifndef _MLXSW_IB_H
#define _MLXSW_IB_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
index 31c886edc791..e92cadc98128 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/item.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -1,37 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/item.h
- * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_ITEM_H
#define _MLXSW_ITEM_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 3dd16267b76c..5a6c4457fb55 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/minimal.c
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#include <linux/i2c.h>
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index fc4557245ff4..4d271fb3de3d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/pci.c
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -53,8 +22,6 @@
#include "port.h"
#include "resources.h"
-static const char mlxsw_pci_driver_name[] = "mlxsw_pci";
-
#define mlxsw_pci_write32(mlxsw_pci, reg, val) \
iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
#define mlxsw_pci_read32(mlxsw_pci, reg) \
@@ -1750,6 +1717,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
const char *driver_name = pdev->driver->name;
struct mlxsw_pci *mlxsw_pci;
+ bool called_again = false;
int err;
mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
@@ -1806,10 +1774,18 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mlxsw_pci->bus_info.dev = &pdev->dev;
mlxsw_pci->id = id;
+again:
err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
&mlxsw_pci_bus, mlxsw_pci, false,
NULL);
- if (err) {
+ /* -EAGAIN is returned in case the FW was updated. FW needs
+ * a reset, so lets try to call mlxsw_core_bus_device_register()
+ * again.
+ */
+ if (err == -EAGAIN && !called_again) {
+ called_again = true;
+ goto again;
+ } else if (err) {
dev_err(&pdev->dev, "cannot register bus device\n");
goto err_bus_device_register;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h
index d65582325cd5..946339e13eb9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/pci.h
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_PCI_H
#define _MLXSW_PCI_H
@@ -39,6 +8,7 @@
#define PCI_DEVICE_ID_MELLANOX_SWITCHX2 0xc738
#define PCI_DEVICE_ID_MELLANOX_SPECTRUM 0xcb84
+#define PCI_DEVICE_ID_MELLANOX_SPECTRUM2 0xcf6c
#define PCI_DEVICE_ID_MELLANOX_SWITCHIB 0xcb20
#define PCI_DEVICE_ID_MELLANOX_SWITCHIB2 0xcf08
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index 963155f6a17a..83f452b7ccbb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
- * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015-2016 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_PCI_HW_H
#define _MLXSW_PCI_HW_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index c580abba8d34..a33eeef0b00c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -1,38 +1,6 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/port.h
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
+
#ifndef _MLXSW_PORT_H
#define _MLXSW_PORT_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 1877d9f8a11a..6e8b619b769b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -1,44 +1,10 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/reg.h
- * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015-2016 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
- * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
- * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_REG_H
#define _MLXSW_REG_H
+#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/bitops.h>
#include <linux/if_vlan.h>
@@ -1943,6 +1909,28 @@ static inline void mlxsw_reg_cwtpm_pack(char *payload, u8 local_port,
mlxsw_reg_cwtpm_ntcp_r_set(payload, profile);
}
+/* PGCR - Policy-Engine General Configuration Register
+ * ---------------------------------------------------
+ * This register configures general Policy-Engine settings.
+ */
+#define MLXSW_REG_PGCR_ID 0x3001
+#define MLXSW_REG_PGCR_LEN 0x20
+
+MLXSW_REG_DEFINE(pgcr, MLXSW_REG_PGCR_ID, MLXSW_REG_PGCR_LEN);
+
+/* reg_pgcr_default_action_pointer_base
+ * Default action pointer base. Each region has a default action pointer
+ * which is equal to default_action_pointer_base + region_id.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pgcr, default_action_pointer_base, 0x1C, 0, 24);
+
+static inline void mlxsw_reg_pgcr_pack(char *payload, u32 pointer_base)
+{
+ MLXSW_REG_ZERO(pgcr, payload);
+ mlxsw_reg_pgcr_default_action_pointer_base_set(payload, pointer_base);
+}
+
/* PPBT - Policy-Engine Port Binding Table
* ---------------------------------------
* This register is used for configuration of the Port Binding Table.
@@ -2132,14 +2120,18 @@ MLXSW_ITEM32(reg, ptar, op, 0x00, 28, 4);
/* reg_ptar_action_set_type
* Type of action set to be used on this region.
- * For Spectrum, this is always type 2 - "flexible"
+ * For Spectrum and Spectrum-2, this is always type 2 - "flexible"
* Access: WO
*/
MLXSW_ITEM32(reg, ptar, action_set_type, 0x00, 16, 8);
+enum mlxsw_reg_ptar_key_type {
+ MLXSW_REG_PTAR_KEY_TYPE_FLEX = 0x50, /* Spetrum */
+ MLXSW_REG_PTAR_KEY_TYPE_FLEX2 = 0x51, /* Spectrum-2 */
+};
+
/* reg_ptar_key_type
* TCAM key type for the region.
- * For Spectrum, this is always type 0x50 - "FLEX_KEY"
* Access: WO
*/
MLXSW_ITEM32(reg, ptar, key_type, 0x00, 0, 8);
@@ -2182,13 +2174,14 @@ MLXSW_ITEM8_INDEXED(reg, ptar, flexible_key_id, 0x20, 0, 8,
MLXSW_REG_PTAR_KEY_ID_LEN, 0x00, false);
static inline void mlxsw_reg_ptar_pack(char *payload, enum mlxsw_reg_ptar_op op,
+ enum mlxsw_reg_ptar_key_type key_type,
u16 region_size, u16 region_id,
const char *tcam_region_info)
{
MLXSW_REG_ZERO(ptar, payload);
mlxsw_reg_ptar_op_set(payload, op);
mlxsw_reg_ptar_action_set_type_set(payload, 2); /* "flexible" */
- mlxsw_reg_ptar_key_type_set(payload, 0x50); /* "FLEX_KEY" */
+ mlxsw_reg_ptar_key_type_set(payload, key_type);
mlxsw_reg_ptar_region_size_set(payload, region_size);
mlxsw_reg_ptar_region_id_set(payload, region_id);
mlxsw_reg_ptar_tcam_region_info_memcpy_to(payload, tcam_region_info);
@@ -2327,6 +2320,23 @@ MLXSW_REG_DEFINE(pefa, MLXSW_REG_PEFA_ID, MLXSW_REG_PEFA_LEN);
*/
MLXSW_ITEM32(reg, pefa, index, 0x00, 0, 24);
+/* reg_pefa_a
+ * Index in the KVD Linear Centralized Database.
+ * Activity
+ * For a new entry: set if ca=0, clear if ca=1
+ * Set if a packet lookup has hit on the specific entry
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pefa, a, 0x04, 29, 1);
+
+/* reg_pefa_ca
+ * Clear activity
+ * When write: activity is according to this field
+ * When read: after reading the activity is cleared according to ca
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, pefa, ca, 0x04, 24, 1);
+
#define MLXSW_REG_FLEX_ACTION_SET_LEN 0xA8
/* reg_pefa_flex_action_set
@@ -2336,12 +2346,20 @@ MLXSW_ITEM32(reg, pefa, index, 0x00, 0, 24);
*/
MLXSW_ITEM_BUF(reg, pefa, flex_action_set, 0x08, MLXSW_REG_FLEX_ACTION_SET_LEN);
-static inline void mlxsw_reg_pefa_pack(char *payload, u32 index,
+static inline void mlxsw_reg_pefa_pack(char *payload, u32 index, bool ca,
const char *flex_action_set)
{
MLXSW_REG_ZERO(pefa, payload);
mlxsw_reg_pefa_index_set(payload, index);
- mlxsw_reg_pefa_flex_action_set_memcpy_to(payload, flex_action_set);
+ mlxsw_reg_pefa_ca_set(payload, ca);
+ if (flex_action_set)
+ mlxsw_reg_pefa_flex_action_set_memcpy_to(payload,
+ flex_action_set);
+}
+
+static inline void mlxsw_reg_pefa_unpack(char *payload, bool *p_a)
+{
+ *p_a = mlxsw_reg_pefa_a_get(payload);
}
/* PTCE-V2 - Policy-Engine TCAM Entry Register Version 2
@@ -2397,6 +2415,15 @@ MLXSW_ITEM32(reg, ptce2, op, 0x00, 20, 3);
*/
MLXSW_ITEM32(reg, ptce2, offset, 0x00, 0, 16);
+/* reg_ptce2_priority
+ * Priority of the rule, higher values win. The range is 1..cap_kvd_size-1.
+ * Note: priority does not have to be unique per rule.
+ * Within a region, higher priority should have lower offset (no limitation
+ * between regions in a multi-region).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce2, priority, 0x04, 0, 24);
+
/* reg_ptce2_tcam_region_info
* Opaque object that represents the TCAM region.
* Access: Index
@@ -2404,14 +2431,14 @@ MLXSW_ITEM32(reg, ptce2, offset, 0x00, 0, 16);
MLXSW_ITEM_BUF(reg, ptce2, tcam_region_info, 0x10,
MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN);
-#define MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN 96
+#define MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN 96
/* reg_ptce2_flex_key_blocks
* ACL Key.
* Access: RW
*/
MLXSW_ITEM_BUF(reg, ptce2, flex_key_blocks, 0x20,
- MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN);
+ MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
/* reg_ptce2_mask
* mask- in the same size as key. A bit that is set directs the TCAM
@@ -2420,7 +2447,7 @@ MLXSW_ITEM_BUF(reg, ptce2, flex_key_blocks, 0x20,
* Access: RW
*/
MLXSW_ITEM_BUF(reg, ptce2, mask, 0x80,
- MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN);
+ MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
/* reg_ptce2_flex_action_set
* ACL action set.
@@ -2432,15 +2459,567 @@ MLXSW_ITEM_BUF(reg, ptce2, flex_action_set, 0xE0,
static inline void mlxsw_reg_ptce2_pack(char *payload, bool valid,
enum mlxsw_reg_ptce2_op op,
const char *tcam_region_info,
- u16 offset)
+ u16 offset, u32 priority)
{
MLXSW_REG_ZERO(ptce2, payload);
mlxsw_reg_ptce2_v_set(payload, valid);
mlxsw_reg_ptce2_op_set(payload, op);
mlxsw_reg_ptce2_offset_set(payload, offset);
+ mlxsw_reg_ptce2_priority_set(payload, priority);
mlxsw_reg_ptce2_tcam_region_info_memcpy_to(payload, tcam_region_info);
}
+/* PERPT - Policy-Engine ERP Table Register
+ * ----------------------------------------
+ * This register adds and removes eRPs from the eRP table.
+ */
+#define MLXSW_REG_PERPT_ID 0x3021
+#define MLXSW_REG_PERPT_LEN 0x80
+
+MLXSW_REG_DEFINE(perpt, MLXSW_REG_PERPT_ID, MLXSW_REG_PERPT_LEN);
+
+/* reg_perpt_erpt_bank
+ * eRP table bank.
+ * Range 0 .. cap_max_erp_table_banks - 1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, perpt, erpt_bank, 0x00, 16, 4);
+
+/* reg_perpt_erpt_index
+ * Index to eRP table within the eRP bank.
+ * Range is 0 .. cap_max_erp_table_bank_size - 1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, perpt, erpt_index, 0x00, 0, 8);
+
+enum mlxsw_reg_perpt_key_size {
+ MLXSW_REG_PERPT_KEY_SIZE_2KB,
+ MLXSW_REG_PERPT_KEY_SIZE_4KB,
+ MLXSW_REG_PERPT_KEY_SIZE_8KB,
+ MLXSW_REG_PERPT_KEY_SIZE_12KB,
+};
+
+/* reg_perpt_key_size
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, perpt, key_size, 0x04, 0, 4);
+
+/* reg_perpt_bf_bypass
+ * 0 - The eRP is used only if bloom filter state is set for the given
+ * rule.
+ * 1 - The eRP is used regardless of bloom filter state.
+ * The bypass is an OR condition of region_id or eRP. See PERCR.bf_bypass
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, perpt, bf_bypass, 0x08, 8, 1);
+
+/* reg_perpt_erp_id
+ * eRP ID for use by the rules.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, perpt, erp_id, 0x08, 0, 4);
+
+/* reg_perpt_erpt_base_bank
+ * Base eRP table bank, points to head of erp_vector
+ * Range is 0 .. cap_max_erp_table_banks - 1
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, perpt, erpt_base_bank, 0x0C, 16, 4);
+
+/* reg_perpt_erpt_base_index
+ * Base index to eRP table within the eRP bank
+ * Range is 0 .. cap_max_erp_table_bank_size - 1
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, perpt, erpt_base_index, 0x0C, 0, 8);
+
+/* reg_perpt_erp_index_in_vector
+ * eRP index in the vector.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, perpt, erp_index_in_vector, 0x10, 0, 4);
+
+/* reg_perpt_erp_vector
+ * eRP vector.
+ * Access: OP
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, perpt, erp_vector, 0x14, 4, 1);
+
+/* reg_perpt_mask
+ * Mask
+ * 0 - A-TCAM will ignore the bit in key
+ * 1 - A-TCAM will compare the bit in key
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, perpt, mask, 0x20, MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
+
+static inline void mlxsw_reg_perpt_erp_vector_pack(char *payload,
+ unsigned long *erp_vector,
+ unsigned long size)
+{
+ unsigned long bit;
+
+ for_each_set_bit(bit, erp_vector, size)
+ mlxsw_reg_perpt_erp_vector_set(payload, bit, true);
+}
+
+static inline void
+mlxsw_reg_perpt_pack(char *payload, u8 erpt_bank, u8 erpt_index,
+ enum mlxsw_reg_perpt_key_size key_size, u8 erp_id,
+ u8 erpt_base_bank, u8 erpt_base_index, u8 erp_index,
+ char *mask)
+{
+ MLXSW_REG_ZERO(perpt, payload);
+ mlxsw_reg_perpt_erpt_bank_set(payload, erpt_bank);
+ mlxsw_reg_perpt_erpt_index_set(payload, erpt_index);
+ mlxsw_reg_perpt_key_size_set(payload, key_size);
+ mlxsw_reg_perpt_bf_bypass_set(payload, true);
+ mlxsw_reg_perpt_erp_id_set(payload, erp_id);
+ mlxsw_reg_perpt_erpt_base_bank_set(payload, erpt_base_bank);
+ mlxsw_reg_perpt_erpt_base_index_set(payload, erpt_base_index);
+ mlxsw_reg_perpt_erp_index_in_vector_set(payload, erp_index);
+ mlxsw_reg_perpt_mask_memcpy_to(payload, mask);
+}
+
+/* PERAR - Policy-Engine Region Association Register
+ * -------------------------------------------------
+ * This register associates a hw region for region_id's. Changing on the fly
+ * is supported by the device.
+ */
+#define MLXSW_REG_PERAR_ID 0x3026
+#define MLXSW_REG_PERAR_LEN 0x08
+
+MLXSW_REG_DEFINE(perar, MLXSW_REG_PERAR_ID, MLXSW_REG_PERAR_LEN);
+
+/* reg_perar_region_id
+ * Region identifier
+ * Range 0 .. cap_max_regions-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, perar, region_id, 0x00, 0, 16);
+
+static inline unsigned int
+mlxsw_reg_perar_hw_regions_needed(unsigned int block_num)
+{
+ return DIV_ROUND_UP(block_num, 4);
+}
+
+/* reg_perar_hw_region
+ * HW Region
+ * Range 0 .. cap_max_regions-1
+ * Default: hw_region = region_id
+ * For a 8 key block region, 2 consecutive regions are used
+ * For a 12 key block region, 3 consecutive regions are used
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, perar, hw_region, 0x04, 0, 16);
+
+static inline void mlxsw_reg_perar_pack(char *payload, u16 region_id,
+ u16 hw_region)
+{
+ MLXSW_REG_ZERO(perar, payload);
+ mlxsw_reg_perar_region_id_set(payload, region_id);
+ mlxsw_reg_perar_hw_region_set(payload, hw_region);
+}
+
+/* PTCE-V3 - Policy-Engine TCAM Entry Register Version 3
+ * -----------------------------------------------------
+ * This register is a new version of PTCE-V2 in order to support the
+ * A-TCAM. This register is not supported by SwitchX/-2 and Spectrum.
+ */
+#define MLXSW_REG_PTCE3_ID 0x3027
+#define MLXSW_REG_PTCE3_LEN 0xF0
+
+MLXSW_REG_DEFINE(ptce3, MLXSW_REG_PTCE3_ID, MLXSW_REG_PTCE3_LEN);
+
+/* reg_ptce3_v
+ * Valid.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce3, v, 0x00, 31, 1);
+
+enum mlxsw_reg_ptce3_op {
+ /* Write operation. Used to write a new entry to the table.
+ * All R/W fields are relevant for new entry. Activity bit is set
+ * for new entries. Write with v = 0 will delete the entry. Must
+ * not be used if an entry exists.
+ */
+ MLXSW_REG_PTCE3_OP_WRITE_WRITE = 0,
+ /* Update operation */
+ MLXSW_REG_PTCE3_OP_WRITE_UPDATE = 1,
+ /* Read operation */
+ MLXSW_REG_PTCE3_OP_QUERY_READ = 0,
+};
+
+/* reg_ptce3_op
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ptce3, op, 0x00, 20, 3);
+
+/* reg_ptce3_priority
+ * Priority of the rule. Higher values win.
+ * For Spectrum-2 range is 1..cap_kvd_size - 1
+ * Note: Priority does not have to be unique per rule.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce3, priority, 0x04, 0, 24);
+
+/* reg_ptce3_tcam_region_info
+ * Opaque object that represents the TCAM region.
+ * Access: Index
+ */
+MLXSW_ITEM_BUF(reg, ptce3, tcam_region_info, 0x10,
+ MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN);
+
+/* reg_ptce3_flex2_key_blocks
+ * ACL key. The key must be masked according to eRP (if exists) or
+ * according to master mask.
+ * Access: Index
+ */
+MLXSW_ITEM_BUF(reg, ptce3, flex2_key_blocks, 0x20,
+ MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
+
+/* reg_ptce3_erp_id
+ * eRP ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptce3, erp_id, 0x80, 0, 4);
+
+/* reg_ptce3_delta_start
+ * Start point of delta_value and delta_mask, in bits. Must not exceed
+ * num_key_blocks * 36 - 8. Reserved when delta_mask = 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptce3, delta_start, 0x84, 0, 10);
+
+/* reg_ptce3_delta_mask
+ * Delta mask.
+ * 0 - Ignore relevant bit in delta_value
+ * 1 - Compare relevant bit in delta_value
+ * Delta mask must not be set for reserved fields in the key blocks.
+ * Note: No delta when no eRPs. Thus, for regions with
+ * PERERP.erpt_pointer_valid = 0 the delta mask must be 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptce3, delta_mask, 0x88, 16, 8);
+
+/* reg_ptce3_delta_value
+ * Delta value.
+ * Bits which are masked by delta_mask must be 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptce3, delta_value, 0x88, 0, 8);
+
+/* reg_ptce3_prune_vector
+ * Pruning vector relative to the PERPT.erp_id.
+ * Used for reducing lookups.
+ * 0 - NEED: Do a lookup using the eRP.
+ * 1 - PRUNE: Do not perform a lookup using the eRP.
+ * Maybe be modified by PEAPBL and PEAPBM.
+ * Note: In Spectrum-2, a region of 8 key blocks must be set to either
+ * all 1's or all 0's.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, ptce3, prune_vector, 0x90, 4, 1);
+
+/* reg_ptce3_prune_ctcam
+ * Pruning on C-TCAM. Used for reducing lookups.
+ * 0 - NEED: Do a lookup in the C-TCAM.
+ * 1 - PRUNE: Do not perform a lookup in the C-TCAM.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce3, prune_ctcam, 0x94, 31, 1);
+
+/* reg_ptce3_large_exists
+ * Large entry key ID exists.
+ * Within the region:
+ * 0 - SINGLE: The large_entry_key_id is not currently in use.
+ * For rule insert: The MSB of the key (blocks 6..11) will be added.
+ * For rule delete: The MSB of the key will be removed.
+ * 1 - NON_SINGLE: The large_entry_key_id is currently in use.
+ * For rule insert: The MSB of the key (blocks 6..11) will not be added.
+ * For rule delete: The MSB of the key will not be removed.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, ptce3, large_exists, 0x98, 31, 1);
+
+/* reg_ptce3_large_entry_key_id
+ * Large entry key ID.
+ * A key for 12 key blocks rules. Reserved when region has less than 12 key
+ * blocks. Must be different for different keys which have the same common
+ * 6 key blocks (MSB, blocks 6..11) key within a region.
+ * Range is 0..cap_max_pe_large_key_id - 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce3, large_entry_key_id, 0x98, 0, 24);
+
+/* reg_ptce3_action_pointer
+ * Pointer to action.
+ * Range is 0..cap_max_kvd_action_sets - 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce3, action_pointer, 0xA0, 0, 24);
+
+static inline void mlxsw_reg_ptce3_pack(char *payload, bool valid,
+ enum mlxsw_reg_ptce3_op op,
+ u32 priority,
+ const char *tcam_region_info,
+ const char *key, u8 erp_id,
+ bool large_exists, u32 lkey_id,
+ u32 action_pointer)
+{
+ MLXSW_REG_ZERO(ptce3, payload);
+ mlxsw_reg_ptce3_v_set(payload, valid);
+ mlxsw_reg_ptce3_op_set(payload, op);
+ mlxsw_reg_ptce3_priority_set(payload, priority);
+ mlxsw_reg_ptce3_tcam_region_info_memcpy_to(payload, tcam_region_info);
+ mlxsw_reg_ptce3_flex2_key_blocks_memcpy_to(payload, key);
+ mlxsw_reg_ptce3_erp_id_set(payload, erp_id);
+ mlxsw_reg_ptce3_large_exists_set(payload, large_exists);
+ mlxsw_reg_ptce3_large_entry_key_id_set(payload, lkey_id);
+ mlxsw_reg_ptce3_action_pointer_set(payload, action_pointer);
+}
+
+/* PERCR - Policy-Engine Region Configuration Register
+ * ---------------------------------------------------
+ * This register configures the region parameters. The region_id must be
+ * allocated.
+ */
+#define MLXSW_REG_PERCR_ID 0x302A
+#define MLXSW_REG_PERCR_LEN 0x80
+
+MLXSW_REG_DEFINE(percr, MLXSW_REG_PERCR_ID, MLXSW_REG_PERCR_LEN);
+
+/* reg_percr_region_id
+ * Region identifier.
+ * Range 0..cap_max_regions-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, percr, region_id, 0x00, 0, 16);
+
+/* reg_percr_atcam_ignore_prune
+ * Ignore prune_vector by other A-TCAM rules. Used e.g., for a new rule.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, percr, atcam_ignore_prune, 0x04, 25, 1);
+
+/* reg_percr_ctcam_ignore_prune
+ * Ignore prune_ctcam by other A-TCAM rules. Used e.g., for a new rule.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, percr, ctcam_ignore_prune, 0x04, 24, 1);
+
+/* reg_percr_bf_bypass
+ * Bloom filter bypass.
+ * 0 - Bloom filter is used (default)
+ * 1 - Bloom filter is bypassed. The bypass is an OR condition of
+ * region_id or eRP. See PERPT.bf_bypass
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, percr, bf_bypass, 0x04, 16, 1);
+
+/* reg_percr_master_mask
+ * Master mask. Logical OR mask of all masks of all rules of a region
+ * (both A-TCAM and C-TCAM). When there are no eRPs
+ * (erpt_pointer_valid = 0), then this provides the mask.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, percr, master_mask, 0x20, 96);
+
+static inline void mlxsw_reg_percr_pack(char *payload, u16 region_id)
+{
+ MLXSW_REG_ZERO(percr, payload);
+ mlxsw_reg_percr_region_id_set(payload, region_id);
+ mlxsw_reg_percr_atcam_ignore_prune_set(payload, false);
+ mlxsw_reg_percr_ctcam_ignore_prune_set(payload, false);
+ mlxsw_reg_percr_bf_bypass_set(payload, true);
+}
+
+/* PERERP - Policy-Engine Region eRP Register
+ * ------------------------------------------
+ * This register configures the region eRP. The region_id must be
+ * allocated.
+ */
+#define MLXSW_REG_PERERP_ID 0x302B
+#define MLXSW_REG_PERERP_LEN 0x1C
+
+MLXSW_REG_DEFINE(pererp, MLXSW_REG_PERERP_ID, MLXSW_REG_PERERP_LEN);
+
+/* reg_pererp_region_id
+ * Region identifier.
+ * Range 0..cap_max_regions-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pererp, region_id, 0x00, 0, 16);
+
+/* reg_pererp_ctcam_le
+ * C-TCAM lookup enable. Reserved when erpt_pointer_valid = 0.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pererp, ctcam_le, 0x04, 28, 1);
+
+/* reg_pererp_erpt_pointer_valid
+ * erpt_pointer is valid.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pererp, erpt_pointer_valid, 0x10, 31, 1);
+
+/* reg_pererp_erpt_bank_pointer
+ * Pointer to eRP table bank. May be modified at any time.
+ * Range 0..cap_max_erp_table_banks-1
+ * Reserved when erpt_pointer_valid = 0
+ */
+MLXSW_ITEM32(reg, pererp, erpt_bank_pointer, 0x10, 16, 4);
+
+/* reg_pererp_erpt_pointer
+ * Pointer to eRP table within the eRP bank. Can be changed for an
+ * existing region.
+ * Range 0..cap_max_erp_table_size-1
+ * Reserved when erpt_pointer_valid = 0
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pererp, erpt_pointer, 0x10, 0, 8);
+
+/* reg_pererp_erpt_vector
+ * Vector of allowed eRP indexes starting from erpt_pointer within the
+ * erpt_bank_pointer. Next entries will be in next bank.
+ * Note that eRP index is used and not eRP ID.
+ * Reserved when erpt_pointer_valid = 0
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, pererp, erpt_vector, 0x14, 4, 1);
+
+/* reg_pererp_master_rp_id
+ * Master RP ID. When there are no eRPs, then this provides the eRP ID
+ * for the lookup. Can be changed for an existing region.
+ * Reserved when erpt_pointer_valid = 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pererp, master_rp_id, 0x18, 0, 4);
+
+static inline void mlxsw_reg_pererp_erp_vector_pack(char *payload,
+ unsigned long *erp_vector,
+ unsigned long size)
+{
+ unsigned long bit;
+
+ for_each_set_bit(bit, erp_vector, size)
+ mlxsw_reg_pererp_erpt_vector_set(payload, bit, true);
+}
+
+static inline void mlxsw_reg_pererp_pack(char *payload, u16 region_id,
+ bool ctcam_le, bool erpt_pointer_valid,
+ u8 erpt_bank_pointer, u8 erpt_pointer,
+ u8 master_rp_id)
+{
+ MLXSW_REG_ZERO(pererp, payload);
+ mlxsw_reg_pererp_region_id_set(payload, region_id);
+ mlxsw_reg_pererp_ctcam_le_set(payload, ctcam_le);
+ mlxsw_reg_pererp_erpt_pointer_valid_set(payload, erpt_pointer_valid);
+ mlxsw_reg_pererp_erpt_bank_pointer_set(payload, erpt_bank_pointer);
+ mlxsw_reg_pererp_erpt_pointer_set(payload, erpt_pointer);
+ mlxsw_reg_pererp_master_rp_id_set(payload, master_rp_id);
+}
+
+/* IEDR - Infrastructure Entry Delete Register
+ * ----------------------------------------------------
+ * This register is used for deleting entries from the entry tables.
+ * It is legitimate to attempt to delete a nonexisting entry (the device will
+ * respond as a good flow).
+ */
+#define MLXSW_REG_IEDR_ID 0x3804
+#define MLXSW_REG_IEDR_BASE_LEN 0x10 /* base length, without records */
+#define MLXSW_REG_IEDR_REC_LEN 0x8 /* record length */
+#define MLXSW_REG_IEDR_REC_MAX_COUNT 64
+#define MLXSW_REG_IEDR_LEN (MLXSW_REG_IEDR_BASE_LEN + \
+ MLXSW_REG_IEDR_REC_LEN * \
+ MLXSW_REG_IEDR_REC_MAX_COUNT)
+
+MLXSW_REG_DEFINE(iedr, MLXSW_REG_IEDR_ID, MLXSW_REG_IEDR_LEN);
+
+/* reg_iedr_num_rec
+ * Number of records.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, iedr, num_rec, 0x00, 0, 8);
+
+/* reg_iedr_rec_type
+ * Resource type.
+ * Access: OP
+ */
+MLXSW_ITEM32_INDEXED(reg, iedr, rec_type, MLXSW_REG_IEDR_BASE_LEN, 24, 8,
+ MLXSW_REG_IEDR_REC_LEN, 0x00, false);
+
+/* reg_iedr_rec_size
+ * Size of entries do be deleted. The unit is 1 entry, regardless of entry type.
+ * Access: OP
+ */
+MLXSW_ITEM32_INDEXED(reg, iedr, rec_size, MLXSW_REG_IEDR_BASE_LEN, 0, 11,
+ MLXSW_REG_IEDR_REC_LEN, 0x00, false);
+
+/* reg_iedr_rec_index_start
+ * Resource index start.
+ * Access: OP
+ */
+MLXSW_ITEM32_INDEXED(reg, iedr, rec_index_start, MLXSW_REG_IEDR_BASE_LEN, 0, 24,
+ MLXSW_REG_IEDR_REC_LEN, 0x04, false);
+
+static inline void mlxsw_reg_iedr_pack(char *payload)
+{
+ MLXSW_REG_ZERO(iedr, payload);
+}
+
+static inline void mlxsw_reg_iedr_rec_pack(char *payload, int rec_index,
+ u8 rec_type, u16 rec_size,
+ u32 rec_index_start)
+{
+ u8 num_rec = mlxsw_reg_iedr_num_rec_get(payload);
+
+ if (rec_index >= num_rec)
+ mlxsw_reg_iedr_num_rec_set(payload, rec_index + 1);
+ mlxsw_reg_iedr_rec_type_set(payload, rec_index, rec_type);
+ mlxsw_reg_iedr_rec_size_set(payload, rec_index, rec_size);
+ mlxsw_reg_iedr_rec_index_start_set(payload, rec_index, rec_index_start);
+}
+
+/* QPTS - QoS Priority Trust State Register
+ * ----------------------------------------
+ * This register controls the port policy to calculate the switch priority and
+ * packet color based on incoming packet fields.
+ */
+#define MLXSW_REG_QPTS_ID 0x4002
+#define MLXSW_REG_QPTS_LEN 0x8
+
+MLXSW_REG_DEFINE(qpts, MLXSW_REG_QPTS_ID, MLXSW_REG_QPTS_LEN);
+
+/* reg_qpts_local_port
+ * Local port number.
+ * Access: Index
+ *
+ * Note: CPU port is supported.
+ */
+MLXSW_ITEM32(reg, qpts, local_port, 0x00, 16, 8);
+
+enum mlxsw_reg_qpts_trust_state {
+ MLXSW_REG_QPTS_TRUST_STATE_PCP = 1,
+ MLXSW_REG_QPTS_TRUST_STATE_DSCP = 2, /* For MPLS, trust EXP. */
+};
+
+/* reg_qpts_trust_state
+ * Trust state for a given port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qpts, trust_state, 0x04, 0, 3);
+
+static inline void mlxsw_reg_qpts_pack(char *payload, u8 local_port,
+ enum mlxsw_reg_qpts_trust_state ts)
+{
+ MLXSW_REG_ZERO(qpts, payload);
+
+ mlxsw_reg_qpts_local_port_set(payload, local_port);
+ mlxsw_reg_qpts_trust_state_set(payload, ts);
+}
+
/* QPCR - QoS Policer Configuration Register
* -----------------------------------------
* The QPCR register is used to create policers - that limit
@@ -2753,6 +3332,219 @@ static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
mlxsw_reg_qeec_next_element_index_set(payload, next_index);
}
+/* QRWE - QoS ReWrite Enable
+ * -------------------------
+ * This register configures the rewrite enable per receive port.
+ */
+#define MLXSW_REG_QRWE_ID 0x400F
+#define MLXSW_REG_QRWE_LEN 0x08
+
+MLXSW_REG_DEFINE(qrwe, MLXSW_REG_QRWE_ID, MLXSW_REG_QRWE_LEN);
+
+/* reg_qrwe_local_port
+ * Local port number.
+ * Access: Index
+ *
+ * Note: CPU port is supported. No support for router port.
+ */
+MLXSW_ITEM32(reg, qrwe, local_port, 0x00, 16, 8);
+
+/* reg_qrwe_dscp
+ * Whether to enable DSCP rewrite (default is 0, don't rewrite).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qrwe, dscp, 0x04, 1, 1);
+
+/* reg_qrwe_pcp
+ * Whether to enable PCP and DEI rewrite (default is 0, don't rewrite).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qrwe, pcp, 0x04, 0, 1);
+
+static inline void mlxsw_reg_qrwe_pack(char *payload, u8 local_port,
+ bool rewrite_pcp, bool rewrite_dscp)
+{
+ MLXSW_REG_ZERO(qrwe, payload);
+ mlxsw_reg_qrwe_local_port_set(payload, local_port);
+ mlxsw_reg_qrwe_pcp_set(payload, rewrite_pcp);
+ mlxsw_reg_qrwe_dscp_set(payload, rewrite_dscp);
+}
+
+/* QPDSM - QoS Priority to DSCP Mapping
+ * ------------------------------------
+ * QoS Priority to DSCP Mapping Register
+ */
+#define MLXSW_REG_QPDSM_ID 0x4011
+#define MLXSW_REG_QPDSM_BASE_LEN 0x04 /* base length, without records */
+#define MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN 0x4 /* record length */
+#define MLXSW_REG_QPDSM_PRIO_ENTRY_REC_MAX_COUNT 16
+#define MLXSW_REG_QPDSM_LEN (MLXSW_REG_QPDSM_BASE_LEN + \
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN * \
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_MAX_COUNT)
+
+MLXSW_REG_DEFINE(qpdsm, MLXSW_REG_QPDSM_ID, MLXSW_REG_QPDSM_LEN);
+
+/* reg_qpdsm_local_port
+ * Local Port. Supported for data packets from CPU port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qpdsm, local_port, 0x00, 16, 8);
+
+/* reg_qpdsm_prio_entry_color0_e
+ * Enable update of the entry for color 0 and a given port.
+ * Access: WO
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color0_e,
+ MLXSW_REG_QPDSM_BASE_LEN, 31, 1,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdsm_prio_entry_color0_dscp
+ * DSCP field in the outer label of the packet for color 0 and a given port.
+ * Reserved when e=0.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color0_dscp,
+ MLXSW_REG_QPDSM_BASE_LEN, 24, 6,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdsm_prio_entry_color1_e
+ * Enable update of the entry for color 1 and a given port.
+ * Access: WO
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color1_e,
+ MLXSW_REG_QPDSM_BASE_LEN, 23, 1,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdsm_prio_entry_color1_dscp
+ * DSCP field in the outer label of the packet for color 1 and a given port.
+ * Reserved when e=0.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color1_dscp,
+ MLXSW_REG_QPDSM_BASE_LEN, 16, 6,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdsm_prio_entry_color2_e
+ * Enable update of the entry for color 2 and a given port.
+ * Access: WO
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color2_e,
+ MLXSW_REG_QPDSM_BASE_LEN, 15, 1,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdsm_prio_entry_color2_dscp
+ * DSCP field in the outer label of the packet for color 2 and a given port.
+ * Reserved when e=0.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color2_dscp,
+ MLXSW_REG_QPDSM_BASE_LEN, 8, 6,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+static inline void mlxsw_reg_qpdsm_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(qpdsm, payload);
+ mlxsw_reg_qpdsm_local_port_set(payload, local_port);
+}
+
+static inline void
+mlxsw_reg_qpdsm_prio_pack(char *payload, unsigned short prio, u8 dscp)
+{
+ mlxsw_reg_qpdsm_prio_entry_color0_e_set(payload, prio, 1);
+ mlxsw_reg_qpdsm_prio_entry_color0_dscp_set(payload, prio, dscp);
+ mlxsw_reg_qpdsm_prio_entry_color1_e_set(payload, prio, 1);
+ mlxsw_reg_qpdsm_prio_entry_color1_dscp_set(payload, prio, dscp);
+ mlxsw_reg_qpdsm_prio_entry_color2_e_set(payload, prio, 1);
+ mlxsw_reg_qpdsm_prio_entry_color2_dscp_set(payload, prio, dscp);
+}
+
+/* QPDPM - QoS Port DSCP to Priority Mapping Register
+ * --------------------------------------------------
+ * This register controls the mapping from DSCP field to
+ * Switch Priority for IP packets.
+ */
+#define MLXSW_REG_QPDPM_ID 0x4013
+#define MLXSW_REG_QPDPM_BASE_LEN 0x4 /* base length, without records */
+#define MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN 0x2 /* record length */
+#define MLXSW_REG_QPDPM_DSCP_ENTRY_REC_MAX_COUNT 64
+#define MLXSW_REG_QPDPM_LEN (MLXSW_REG_QPDPM_BASE_LEN + \
+ MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN * \
+ MLXSW_REG_QPDPM_DSCP_ENTRY_REC_MAX_COUNT)
+
+MLXSW_REG_DEFINE(qpdpm, MLXSW_REG_QPDPM_ID, MLXSW_REG_QPDPM_LEN);
+
+/* reg_qpdpm_local_port
+ * Local Port. Supported for data packets from CPU port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qpdpm, local_port, 0x00, 16, 8);
+
+/* reg_qpdpm_dscp_e
+ * Enable update of the specific entry. When cleared, the switch_prio and color
+ * fields are ignored and the previous switch_prio and color values are
+ * preserved.
+ * Access: WO
+ */
+MLXSW_ITEM16_INDEXED(reg, qpdpm, dscp_entry_e, MLXSW_REG_QPDPM_BASE_LEN, 15, 1,
+ MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdpm_dscp_prio
+ * The new Switch Priority value for the relevant DSCP value.
+ * Access: RW
+ */
+MLXSW_ITEM16_INDEXED(reg, qpdpm, dscp_entry_prio,
+ MLXSW_REG_QPDPM_BASE_LEN, 0, 4,
+ MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN, 0x00, false);
+
+static inline void mlxsw_reg_qpdpm_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(qpdpm, payload);
+ mlxsw_reg_qpdpm_local_port_set(payload, local_port);
+}
+
+static inline void
+mlxsw_reg_qpdpm_dscp_pack(char *payload, unsigned short dscp, u8 prio)
+{
+ mlxsw_reg_qpdpm_dscp_entry_e_set(payload, dscp, 1);
+ mlxsw_reg_qpdpm_dscp_entry_prio_set(payload, dscp, prio);
+}
+
+/* QTCTM - QoS Switch Traffic Class Table is Multicast-Aware Register
+ * ------------------------------------------------------------------
+ * This register configures if the Switch Priority to Traffic Class mapping is
+ * based on Multicast packet indication. If so, then multicast packets will get
+ * a Traffic Class that is plus (cap_max_tclass_data/2) the value configured by
+ * QTCT.
+ * By default, Switch Priority to Traffic Class mapping is not based on
+ * Multicast packet indication.
+ */
+#define MLXSW_REG_QTCTM_ID 0x401A
+#define MLXSW_REG_QTCTM_LEN 0x08
+
+MLXSW_REG_DEFINE(qtctm, MLXSW_REG_QTCTM_ID, MLXSW_REG_QTCTM_LEN);
+
+/* reg_qtctm_local_port
+ * Local port number.
+ * No support for CPU port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qtctm, local_port, 0x00, 16, 8);
+
+/* reg_qtctm_mc
+ * Multicast Mode
+ * Whether Switch Priority to Traffic Class mapping is based on Multicast packet
+ * indication (default is 0, not based on Multicast packet indication).
+ */
+MLXSW_ITEM32(reg, qtctm, mc, 0x04, 0, 1);
+
+static inline void
+mlxsw_reg_qtctm_pack(char *payload, u8 local_port, bool mc)
+{
+ MLXSW_REG_ZERO(qtctm, payload);
+ mlxsw_reg_qtctm_local_port_set(payload, local_port);
+ mlxsw_reg_qtctm_mc_set(payload, mc);
+}
+
/* PMLP - Ports Module to Local Port Register
* ------------------------------------------
* Configures the assignment of modules to local ports.
@@ -3350,6 +4142,7 @@ MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
enum mlxsw_reg_ppcnt_grp {
MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0,
+ MLXSW_REG_PPCNT_RFC_2819_CNT = 0x2,
MLXSW_REG_PPCNT_EXT_CNT = 0x5,
MLXSW_REG_PPCNT_PRIO_CNT = 0x10,
MLXSW_REG_PPCNT_TC_CNT = 0x11,
@@ -3508,6 +4301,68 @@ MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received,
MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted,
MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64);
+/* Ethernet RFC 2819 Counter Group */
+
+/* reg_ppcnt_ether_stats_pkts64octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts64octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x58, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts65to127octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts65to127octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts128to255octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts128to255octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x68, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts256to511octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts256to511octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts512to1023octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts512to1023octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x78, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts1024to1518octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts1024to1518octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x80, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts1519to2047octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts1519to2047octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x88, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts2048to4095octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts2048to4095octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts4096to8191octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts4096to8191octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x98, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts8192to10239octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts8192to10239octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0xA0, 0, 64);
+
/* Ethernet Extended Counter Group Counters */
/* reg_ppcnt_ecn_marked
@@ -4338,6 +5193,20 @@ MLXSW_ITEM32(reg, ritr, if_swid, 0x08, 24, 8);
*/
MLXSW_ITEM_BUF(reg, ritr, if_mac, 0x12, 6);
+/* reg_ritr_if_vrrp_id_ipv6
+ * VRRP ID for IPv6
+ * Note: Reserved for RIF types other than VLAN, FID and Sub-port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, if_vrrp_id_ipv6, 0x1C, 8, 8);
+
+/* reg_ritr_if_vrrp_id_ipv4
+ * VRRP ID for IPv4
+ * Note: Reserved for RIF types other than VLAN, FID and Sub-port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, if_vrrp_id_ipv4, 0x1C, 0, 8);
+
/* VLAN Interface */
/* reg_ritr_vlan_if_vid
@@ -7871,6 +8740,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(spvmlr),
MLXSW_REG(cwtp),
MLXSW_REG(cwtpm),
+ MLXSW_REG(pgcr),
MLXSW_REG(ppbt),
MLXSW_REG(pacl),
MLXSW_REG(pagt),
@@ -7879,9 +8749,20 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(prcr),
MLXSW_REG(pefa),
MLXSW_REG(ptce2),
+ MLXSW_REG(perpt),
+ MLXSW_REG(perar),
+ MLXSW_REG(ptce3),
+ MLXSW_REG(percr),
+ MLXSW_REG(pererp),
+ MLXSW_REG(iedr),
+ MLXSW_REG(qpts),
MLXSW_REG(qpcr),
MLXSW_REG(qtct),
MLXSW_REG(qeec),
+ MLXSW_REG(qrwe),
+ MLXSW_REG(qpdsm),
+ MLXSW_REG(qpdpm),
+ MLXSW_REG(qtctm),
MLXSW_REG(pmlp),
MLXSW_REG(pmtu),
MLXSW_REG(ptys),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index fd9299ccec72..79a31de7c825 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/resources.h
- * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016-2017 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_RESOURCES_H
#define _MLXSW_RESOURCES_H
@@ -42,6 +11,8 @@ enum mlxsw_res_id {
MLXSW_RES_ID_KVD_SIZE,
MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE,
MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE,
+ MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE,
+ MLXSW_RES_ID_MAX_KVD_ACTION_SETS,
MLXSW_RES_ID_MAX_TRAP_GROUPS,
MLXSW_RES_ID_CQE_V0,
MLXSW_RES_ID_CQE_V1,
@@ -63,6 +34,13 @@ enum mlxsw_res_id {
MLXSW_RES_ID_ACL_FLEX_KEYS,
MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE,
MLXSW_RES_ID_ACL_ACTIONS_PER_SET,
+ MLXSW_RES_ID_ACL_MAX_ERPT_BANKS,
+ MLXSW_RES_ID_ACL_MAX_ERPT_BANK_SIZE,
+ MLXSW_RES_ID_ACL_MAX_LARGE_KEY_ID,
+ MLXSW_RES_ID_ACL_ERPT_ENTRIES_2KB,
+ MLXSW_RES_ID_ACL_ERPT_ENTRIES_4KB,
+ MLXSW_RES_ID_ACL_ERPT_ENTRIES_8KB,
+ MLXSW_RES_ID_ACL_ERPT_ENTRIES_12KB,
MLXSW_RES_ID_MAX_CPU_POLICERS,
MLXSW_RES_ID_MAX_VRS,
MLXSW_RES_ID_MAX_RIFS,
@@ -83,6 +61,8 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_KVD_SIZE] = 0x1001,
[MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002,
[MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003,
+ [MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE] = 0x1005,
+ [MLXSW_RES_ID_MAX_KVD_ACTION_SETS] = 0x1007,
[MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201,
[MLXSW_RES_ID_CQE_V0] = 0x2210,
[MLXSW_RES_ID_CQE_V1] = 0x2211,
@@ -104,6 +84,13 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_ACL_FLEX_KEYS] = 0x2910,
[MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE] = 0x2911,
[MLXSW_RES_ID_ACL_ACTIONS_PER_SET] = 0x2912,
+ [MLXSW_RES_ID_ACL_MAX_ERPT_BANKS] = 0x2940,
+ [MLXSW_RES_ID_ACL_MAX_ERPT_BANK_SIZE] = 0x2941,
+ [MLXSW_RES_ID_ACL_MAX_LARGE_KEY_ID] = 0x2942,
+ [MLXSW_RES_ID_ACL_ERPT_ENTRIES_2KB] = 0x2950,
+ [MLXSW_RES_ID_ACL_ERPT_ENTRIES_4KB] = 0x2951,
+ [MLXSW_RES_ID_ACL_ERPT_ENTRIES_8KB] = 0x2952,
+ [MLXSW_RES_ID_ACL_ERPT_ENTRIES_12KB] = 0x2953,
[MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13,
[MLXSW_RES_ID_MAX_VRS] = 0x2C01,
[MLXSW_RES_ID_MAX_RIFS] = 0x2C02,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 968b88af2ef5..6070d1591d1e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1,38 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
- * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -74,17 +41,27 @@
#include "spectrum_span.h"
#include "../mlxfw/mlxfw.h"
-#define MLXSW_FWREV_MAJOR 13
-#define MLXSW_FWREV_MINOR 1620
-#define MLXSW_FWREV_SUBMINOR 192
-#define MLXSW_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
+#define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
+
+#define MLXSW_SP1_FWREV_MAJOR 13
+#define MLXSW_SP1_FWREV_MINOR 1702
+#define MLXSW_SP1_FWREV_SUBMINOR 6
+#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
-#define MLXSW_SP_FW_FILENAME \
- "mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \
- "." __stringify(MLXSW_FWREV_MINOR) \
- "." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2"
+static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
+ .major = MLXSW_SP1_FWREV_MAJOR,
+ .minor = MLXSW_SP1_FWREV_MINOR,
+ .subminor = MLXSW_SP1_FWREV_SUBMINOR,
+ .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
+};
+
+#define MLXSW_SP1_FW_FILENAME \
+ "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
+ "." __stringify(MLXSW_SP1_FWREV_MINOR) \
+ "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
-static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
+static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
+static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
static const char mlxsw_sp_driver_version[] = "1.0";
/* tx_hdr_version
@@ -338,35 +315,50 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
{
const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
+ const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev;
+ const char *fw_filename = mlxsw_sp->fw_filename;
const struct firmware *firmware;
int err;
+ /* Don't check if driver does not require it */
+ if (!req_rev || !fw_filename)
+ return 0;
+
/* Validate driver & FW are compatible */
- if (rev->major != MLXSW_FWREV_MAJOR) {
+ if (rev->major != req_rev->major) {
WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
- rev->major, MLXSW_FWREV_MAJOR);
+ rev->major, req_rev->major);
return -EINVAL;
}
- if (MLXSW_FWREV_MINOR_TO_BRANCH(rev->minor) ==
- MLXSW_FWREV_MINOR_TO_BRANCH(MLXSW_FWREV_MINOR))
+ if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) ==
+ MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor))
return 0;
dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n",
rev->major, rev->minor, rev->subminor);
dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n",
- MLXSW_SP_FW_FILENAME);
+ fw_filename);
- err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME,
+ err = request_firmware_direct(&firmware, fw_filename,
mlxsw_sp->bus_info->dev);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
- MLXSW_SP_FW_FILENAME);
+ fw_filename);
return err;
}
err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
release_firmware(firmware);
- return err;
+ if (err)
+ dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
+
+ /* On FW flash success, tell the caller FW reset is needed
+ * if current FW supports it.
+ */
+ if (rev->minor >= req_rev->can_reset_minor)
+ return err ? err : -EAGAIN;
+ else
+ return 0;
}
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
@@ -1441,6 +1433,11 @@ mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block,
return 0;
case TC_CLSFLOWER_STATS:
return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f);
+ case TC_CLSFLOWER_TMPLT_CREATE:
+ return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f);
+ case TC_CLSFLOWER_TMPLT_DESTROY:
+ mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f);
+ return 0;
default:
return -EOPNOTSUPP;
}
@@ -1503,7 +1500,8 @@ static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type,
static int
mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tcf_block *block, bool ingress)
+ struct tcf_block *block, bool ingress,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_acl_block *acl_block;
@@ -1518,7 +1516,7 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
return -ENOMEM;
block_cb = __tcf_block_cb_register(block,
mlxsw_sp_setup_tc_block_cb_flower,
- mlxsw_sp, acl_block);
+ mlxsw_sp, acl_block, extack);
if (IS_ERR(block_cb)) {
err = PTR_ERR(block_cb);
goto err_cb_register;
@@ -1541,7 +1539,7 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
err_block_bind:
if (!tcf_block_cb_decref(block_cb)) {
- __tcf_block_cb_unregister(block_cb);
+ __tcf_block_cb_unregister(block, block_cb);
err_cb_register:
mlxsw_sp_acl_block_destroy(acl_block);
}
@@ -1571,7 +1569,7 @@ mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block,
mlxsw_sp_port, ingress);
if (!err && !tcf_block_cb_decref(block_cb)) {
- __tcf_block_cb_unregister(block_cb);
+ __tcf_block_cb_unregister(block, block_cb);
mlxsw_sp_acl_block_destroy(acl_block);
}
}
@@ -1596,11 +1594,12 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
switch (f->command) {
case TC_BLOCK_BIND:
err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
- mlxsw_sp_port);
+ mlxsw_sp_port, f->extack);
if (err)
return err;
err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port,
- f->block, ingress);
+ f->block, ingress,
+ f->extack);
if (err) {
tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
return err;
@@ -1712,7 +1711,8 @@ static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind,
+ sizeof(drvinfo->driver));
strlcpy(drvinfo->version, mlxsw_sp_driver_version,
sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
@@ -1873,6 +1873,52 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = {
+ {
+ .str = "ether_pkts64octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get,
+ },
+ {
+ .str = "ether_pkts65to127octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get,
+ },
+ {
+ .str = "ether_pkts128to255octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get,
+ },
+ {
+ .str = "ether_pkts256to511octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get,
+ },
+ {
+ .str = "ether_pkts512to1023octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get,
+ },
+ {
+ .str = "ether_pkts1024to1518octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get,
+ },
+ {
+ .str = "ether_pkts1519to2047octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get,
+ },
+ {
+ .str = "ether_pkts2048to4095octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get,
+ },
+ {
+ .str = "ether_pkts4096to8191octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get,
+ },
+ {
+ .str = "ether_pkts8192to10239octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \
+ ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats)
+
static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
{
.str = "rx_octets_prio",
@@ -1925,9 +1971,11 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
- (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
- MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
- IEEE_8021QAZ_MAX_TCS)
+ MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \
+ (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \
+ IEEE_8021QAZ_MAX_TCS) + \
+ (MLXSW_SP_PORT_HW_TC_STATS_LEN * \
+ TC_MAX_QUEUE))
static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
{
@@ -1964,11 +2012,16 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+ for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
mlxsw_sp_port_get_prio_strings(&p, i);
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ for (i = 0; i < TC_MAX_QUEUE; i++)
mlxsw_sp_port_get_tc_strings(&p, i);
break;
@@ -2003,10 +2056,14 @@ mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
int *p_len, enum mlxsw_reg_ppcnt_grp grp)
{
switch (grp) {
- case MLXSW_REG_PPCNT_IEEE_8023_CNT:
+ case MLXSW_REG_PPCNT_IEEE_8023_CNT:
*p_hw_stats = mlxsw_sp_port_hw_stats;
*p_len = MLXSW_SP_PORT_HW_STATS_LEN;
break;
+ case MLXSW_REG_PPCNT_RFC_2819_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats;
+ *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
+ break;
case MLXSW_REG_PPCNT_PRIO_CNT:
*p_hw_stats = mlxsw_sp_port_hw_prio_stats;
*p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
@@ -2056,6 +2113,11 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev,
data, data_index);
data_index = MLXSW_SP_PORT_HW_STATS_LEN;
+ /* RFC 2819 Counters */
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0,
+ data, data_index);
+ data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
+
/* Per-Priority Counters */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
@@ -2064,7 +2126,7 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev,
}
/* Per-TC Counters */
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ for (i = 0; i < TC_MAX_QUEUE; i++) {
__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
data, data_index);
data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
@@ -2711,9 +2773,16 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
false, 0);
if (err)
return err;
+
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_TC,
+ i + 8, i,
+ false, 0);
+ if (err)
+ return err;
}
- /* Make sure the max shaper is disabled in all hierarcies that
+ /* Make sure the max shaper is disabled in all hierarchies that
* support it.
*/
err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
@@ -2748,6 +2817,16 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
return 0;
}
+static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qtctm_pl[MLXSW_REG_QTCTM_LEN];
+
+ mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
+}
+
static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
bool split, u8 module, u8 width, u8 lane)
{
@@ -2876,6 +2955,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_ets_init;
}
+ err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_tc_mc_mode;
+ }
+
/* ETS and buffers must be initialized before DCB. */
err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
if (err) {
@@ -2932,6 +3018,8 @@ err_port_qdiscs_init:
err_port_fids_init:
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
err_port_dcb_init:
+ mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
+err_port_tc_mc_mode:
err_port_ets_init:
err_port_buffers_init:
err_port_admin_status_set:
@@ -2966,6 +3054,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
mlxsw_sp_port_fids_fini(mlxsw_sp_port);
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
+ mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
mlxsw_sp_port_module_unmap(mlxsw_sp_port);
kfree(mlxsw_sp_port->sample);
@@ -3371,6 +3460,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
+ MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
+ MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
/* PKT Sample trap */
MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
false, SP_IP2ME, DISCARD),
@@ -3623,10 +3714,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->bus_info = mlxsw_bus_info;
err = mlxsw_sp_fw_rev_validate(mlxsw_sp);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
+ if (err)
return err;
- }
err = mlxsw_sp_base_mac_get(mlxsw_sp);
if (err) {
@@ -3757,6 +3846,36 @@ err_fids_init:
return err;
}
+static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev;
+ mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME;
+ mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
+ mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
+ mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
+ mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
+ mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
+
+ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
+}
+
+static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
+ mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
+ mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
+ mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
+ mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
+
+ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
+}
+
static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
@@ -3777,7 +3896,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_kvdl_fini(mlxsw_sp);
}
-static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
+static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
.used_max_mid = 1,
.max_mid = MLXSW_SP_MID_MAX,
.used_flood_tables = 1,
@@ -3803,6 +3922,28 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
},
};
+static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
+ .used_max_mid = 1,
+ .max_mid = MLXSW_SP_MID_MAX,
+ .used_flood_tables = 1,
+ .used_flood_mode = 1,
+ .flood_mode = 3,
+ .max_fid_offset_flood_tables = 3,
+ .fid_offset_flood_table_size = VLAN_N_VID - 1,
+ .max_fid_flood_tables = 3,
+ .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX,
+ .used_max_ib_mc = 1,
+ .max_ib_mc = 0,
+ .used_max_pkey = 1,
+ .max_pkey = 0,
+ .swid_config = {
+ {
+ .used_type = 1,
+ .type = MLXSW_PORT_SWID_TYPE_ETH,
+ }
+ },
+};
+
static void
mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
struct devlink_resource_size_params *kvd_size_params,
@@ -3839,7 +3980,7 @@ mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
DEVLINK_RESOURCE_UNIT_ENTRY);
}
-static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
+static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
struct devlink_resource_size_params hash_single_size_params;
@@ -3850,7 +3991,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
const struct mlxsw_config_profile *profile;
int err;
- profile = &mlxsw_sp_config_profile;
+ profile = &mlxsw_sp1_config_profile;
if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
return -EIO;
@@ -3876,7 +4017,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
if (err)
return err;
- err = mlxsw_sp_kvdl_resources_register(mlxsw_core);
+ err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
if (err)
return err;
@@ -3905,6 +4046,16 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
return 0;
}
+static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_sp1_resources_kvd_register(mlxsw_core);
+}
+
+static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
+{
+ return 0;
+}
+
static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
const struct mlxsw_config_profile *profile,
u64 *p_single_size, u64 *p_double_size,
@@ -3960,10 +4111,10 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
return 0;
}
-static struct mlxsw_driver mlxsw_sp_driver = {
- .kind = mlxsw_sp_driver_name,
+static struct mlxsw_driver mlxsw_sp1_driver = {
+ .kind = mlxsw_sp1_driver_name,
.priv_size = sizeof(struct mlxsw_sp),
- .init = mlxsw_sp_init,
+ .init = mlxsw_sp1_init,
.fini = mlxsw_sp_fini,
.basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
.port_split = mlxsw_sp_port_split,
@@ -3979,10 +4130,35 @@ static struct mlxsw_driver mlxsw_sp_driver = {
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
.txhdr_construct = mlxsw_sp_txhdr_construct,
- .resources_register = mlxsw_sp_resources_register,
+ .resources_register = mlxsw_sp1_resources_register,
.kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
.txhdr_len = MLXSW_TXHDR_LEN,
- .profile = &mlxsw_sp_config_profile,
+ .profile = &mlxsw_sp1_config_profile,
+ .res_query_enabled = true,
+};
+
+static struct mlxsw_driver mlxsw_sp2_driver = {
+ .kind = mlxsw_sp2_driver_name,
+ .priv_size = sizeof(struct mlxsw_sp),
+ .init = mlxsw_sp2_init,
+ .fini = mlxsw_sp_fini,
+ .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
+ .port_split = mlxsw_sp_port_split,
+ .port_unsplit = mlxsw_sp_port_unsplit,
+ .sb_pool_get = mlxsw_sp_sb_pool_get,
+ .sb_pool_set = mlxsw_sp_sb_pool_set,
+ .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
+ .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
+ .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
+ .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
+ .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
+ .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
+ .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
+ .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
+ .txhdr_construct = mlxsw_sp_txhdr_construct,
+ .resources_register = mlxsw_sp2_resources_register,
+ .txhdr_len = MLXSW_TXHDR_LEN,
+ .profile = &mlxsw_sp2_config_profile,
.res_query_enabled = true,
};
@@ -4397,7 +4573,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
if (!is_vlan_dev(upper_dev) &&
!netif_is_lag_master(upper_dev) &&
!netif_is_bridge_master(upper_dev) &&
- !netif_is_ovs_master(upper_dev)) {
+ !netif_is_ovs_master(upper_dev) &&
+ !netif_is_macvlan(upper_dev)) {
NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
return -EINVAL;
}
@@ -4423,6 +4600,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
return -EINVAL;
}
+ if (netif_is_macvlan(upper_dev) &&
+ !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
+ return -EOPNOTSUPP;
+ }
if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
return -EINVAL;
@@ -4461,6 +4643,9 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
else
mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
+ } else if (netif_is_macvlan(upper_dev)) {
+ if (!info->linking)
+ mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
}
break;
}
@@ -4545,8 +4730,9 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
switch (event) {
case NETDEV_PRECHANGEUPPER:
upper_dev = info->upper_dev;
- if (!netif_is_bridge_master(upper_dev)) {
- NL_SET_ERR_MSG_MOD(extack, "VLAN devices only support bridge and VRF uppers");
+ if (!netif_is_bridge_master(upper_dev) &&
+ !netif_is_macvlan(upper_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
return -EINVAL;
}
if (!info->linking)
@@ -4558,6 +4744,11 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
return -EINVAL;
}
+ if (netif_is_macvlan(upper_dev) &&
+ !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
+ return -EOPNOTSUPP;
+ }
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
@@ -4571,6 +4762,9 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
vlan_dev,
upper_dev);
+ } else if (netif_is_macvlan(upper_dev)) {
+ if (!info->linking)
+ mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
} else {
err = -EINVAL;
WARN_ON(1);
@@ -4620,6 +4814,64 @@ static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
return 0;
}
+static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
+ unsigned long event, void *ptr)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct netlink_ext_ack *extack;
+ struct net_device *upper_dev;
+
+ if (!mlxsw_sp)
+ return 0;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
+ return -EOPNOTSUPP;
+ }
+ if (!info->linking)
+ break;
+ if (netif_is_macvlan(upper_dev) &&
+ !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
+ return -EOPNOTSUPP;
+ }
+ break;
+ case NETDEV_CHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (info->linking)
+ break;
+ if (netif_is_macvlan(upper_dev))
+ mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
+ break;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
+ unsigned long event, void *ptr)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct netlink_ext_ack *extack;
+
+ if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
+ return 0;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
+
+ return -EOPNOTSUPP;
+}
+
static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
{
struct netdev_notifier_changeupper_info *info = ptr;
@@ -4661,6 +4913,10 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
else if (is_vlan_dev(dev))
err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
+ else if (netif_is_bridge_master(dev))
+ err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
+ else if (netif_is_macvlan(dev))
+ err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
return notifier_from_errno(err);
}
@@ -4681,14 +4937,24 @@ static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = {
.notifier_call = mlxsw_sp_inet6addr_event,
};
-static const struct pci_device_id mlxsw_sp_pci_id_table[] = {
+static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
{0, },
};
-static struct pci_driver mlxsw_sp_pci_driver = {
- .name = mlxsw_sp_driver_name,
- .id_table = mlxsw_sp_pci_id_table,
+static struct pci_driver mlxsw_sp1_pci_driver = {
+ .name = mlxsw_sp1_driver_name,
+ .id_table = mlxsw_sp1_pci_id_table,
+};
+
+static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
+ {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
+ {0, },
+};
+
+static struct pci_driver mlxsw_sp2_pci_driver = {
+ .name = mlxsw_sp2_driver_name,
+ .id_table = mlxsw_sp2_pci_id_table,
};
static int __init mlxsw_sp_module_init(void)
@@ -4700,19 +4966,31 @@ static int __init mlxsw_sp_module_init(void)
register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
- err = mlxsw_core_driver_register(&mlxsw_sp_driver);
+ err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
+ if (err)
+ goto err_sp1_core_driver_register;
+
+ err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
+ if (err)
+ goto err_sp2_core_driver_register;
+
+ err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
if (err)
- goto err_core_driver_register;
+ goto err_sp1_pci_driver_register;
- err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver);
+ err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
if (err)
- goto err_pci_driver_register;
+ goto err_sp2_pci_driver_register;
return 0;
-err_pci_driver_register:
- mlxsw_core_driver_unregister(&mlxsw_sp_driver);
-err_core_driver_register:
+err_sp2_pci_driver_register:
+ mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
+err_sp1_pci_driver_register:
+ mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
+err_sp2_core_driver_register:
+ mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
+err_sp1_core_driver_register:
unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
@@ -4722,8 +5000,10 @@ err_core_driver_register:
static void __exit mlxsw_sp_module_exit(void)
{
- mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver);
- mlxsw_core_driver_unregister(&mlxsw_sp_driver);
+ mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
+ mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
+ mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
+ mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
@@ -4736,5 +5016,6 @@ module_exit(mlxsw_sp_module_exit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Spectrum driver");
-MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);
-MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME);
+MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
+MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
+MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 4a519d8edec8..3ae930196741 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -1,38 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum.h
- * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_SPECTRUM_H
#define _MLXSW_SPECTRUM_H
@@ -54,6 +21,7 @@
#include "core.h"
#include "core_acl_flex_keys.h"
#include "core_acl_flex_actions.h"
+#include "reg.h"
#define MLXSW_SP_FID_8021D_MAX 1024
@@ -145,6 +113,9 @@ struct mlxsw_sp_acl;
struct mlxsw_sp_counter_pool;
struct mlxsw_sp_fid_core;
struct mlxsw_sp_kvdl;
+struct mlxsw_sp_kvdl_ops;
+struct mlxsw_sp_mr_tcam_ops;
+struct mlxsw_sp_acl_tcam_ops;
struct mlxsw_sp {
struct mlxsw_sp_port **ports;
@@ -168,6 +139,13 @@ struct mlxsw_sp {
struct mlxsw_sp_span_entry *entries;
int entries_count;
} span;
+ const struct mlxsw_fw_rev *req_rev;
+ const char *fw_filename;
+ const struct mlxsw_sp_kvdl_ops *kvdl_ops;
+ const struct mlxsw_afa_ops *afa_ops;
+ const struct mlxsw_afk_ops *afk_ops;
+ const struct mlxsw_sp_mr_tcam_ops *mr_tcam_ops;
+ const struct mlxsw_sp_acl_tcam_ops *acl_tcam_ops;
};
static inline struct mlxsw_sp_upper *
@@ -233,6 +211,7 @@ struct mlxsw_sp_port {
struct ieee_ets *ets;
struct ieee_maxrate *maxrate;
struct ieee_pfc *pfc;
+ enum mlxsw_reg_qpts_trust_state trust_state;
} dcb;
struct {
u8 module;
@@ -407,6 +386,8 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_netdevice_router_port_event(struct net_device *dev);
+void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *macvlan_dev);
int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
unsigned long event, void *ptr);
int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
@@ -435,15 +416,62 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
/* spectrum_kvdl.c */
+enum mlxsw_sp_kvdl_entry_type {
+ MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
+ MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
+};
+
+static inline unsigned int
+mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type)
+{
+ switch (type) {
+ case MLXSW_SP_KVDL_ENTRY_TYPE_ADJ: /* fall through */
+ case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET: /* fall through */
+ case MLXSW_SP_KVDL_ENTRY_TYPE_PBS: /* fall through */
+ case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR: /* fall through */
+ default:
+ return 1;
+ }
+}
+
+struct mlxsw_sp_kvdl_ops {
+ size_t priv_size;
+ int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
+ void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
+ int (*alloc)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, u32 *p_entry_index);
+ void (*free)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, int entry_index);
+ int (*alloc_size_query)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ unsigned int *p_alloc_count);
+ int (*resources_register)(struct mlxsw_sp *mlxsw_sp, void *priv);
+};
+
int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp);
-int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
- u32 *p_entry_index);
-void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
-int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
- unsigned int entry_count,
- unsigned int *p_alloc_size);
-int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core);
+int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, u32 *p_entry_index);
+void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, int entry_index);
+int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ unsigned int *p_alloc_count);
+
+/* spectrum1_kvdl.c */
+extern const struct mlxsw_sp_kvdl_ops mlxsw_sp1_kvdl_ops;
+int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core);
+
+/* spectrum2_kvdl.c */
+extern const struct mlxsw_sp_kvdl_ops mlxsw_sp2_kvdl_ops;
struct mlxsw_sp_acl_rule_info {
unsigned int priority;
@@ -452,44 +480,14 @@ struct mlxsw_sp_acl_rule_info {
unsigned int counter_index;
};
-enum mlxsw_sp_acl_profile {
- MLXSW_SP_ACL_PROFILE_FLOWER,
-};
-
-struct mlxsw_sp_acl_profile_ops {
- size_t ruleset_priv_size;
- int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp,
- void *priv, void *ruleset_priv);
- void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
- int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
- struct mlxsw_sp_port *mlxsw_sp_port,
- bool ingress);
- void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
- struct mlxsw_sp_port *mlxsw_sp_port,
- bool ingress);
- u16 (*ruleset_group_id)(void *ruleset_priv);
- size_t rule_priv_size;
- int (*rule_add)(struct mlxsw_sp *mlxsw_sp,
- void *ruleset_priv, void *rule_priv,
- struct mlxsw_sp_acl_rule_info *rulei);
- void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv);
- int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv,
- bool *activity);
-};
-
-struct mlxsw_sp_acl_ops {
- size_t priv_size;
- int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
- void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
- const struct mlxsw_sp_acl_profile_ops *
- (*profile_ops)(struct mlxsw_sp *mlxsw_sp,
- enum mlxsw_sp_acl_profile profile);
-};
-
struct mlxsw_sp_acl_block;
struct mlxsw_sp_acl_ruleset;
/* spectrum_acl.c */
+enum mlxsw_sp_acl_profile {
+ MLXSW_SP_ACL_PROFILE_FLOWER,
+};
+
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block);
unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block);
@@ -507,6 +505,7 @@ int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress);
+bool mlxsw_sp_acl_block_is_egress_bound(struct mlxsw_sp_acl_block *block);
struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, u32 chain_index,
@@ -514,7 +513,8 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, u32 chain_index,
- enum mlxsw_sp_acl_profile profile);
+ enum mlxsw_sp_acl_profile profile,
+ struct mlxsw_afk_element_usage *tmplt_elusage);
void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset);
u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset);
@@ -541,25 +541,30 @@ int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct mlxsw_sp_acl_block *block,
- struct net_device *out_dev);
+ struct net_device *out_dev,
+ struct netlink_ext_ack *extack);
int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
- struct net_device *out_dev);
+ struct net_device *out_dev,
+ struct netlink_ext_ack *extack);
int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
- u32 action, u16 vid, u16 proto, u8 prio);
+ u32 action, u16 vid, u16 proto, u8 prio,
+ struct netlink_ext_ack *extack);
int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_rule_info *rulei);
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct netlink_ext_ack *extack);
int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
- u16 fid);
+ u16 fid, struct netlink_ext_ack *extack);
struct mlxsw_sp_acl_rule;
struct mlxsw_sp_acl_rule *
mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset,
- unsigned long cookie);
+ unsigned long cookie,
+ struct netlink_ext_ack *extack);
void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule *rule);
int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
@@ -582,7 +587,52 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
/* spectrum_acl_tcam.c */
-extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops;
+struct mlxsw_sp_acl_tcam;
+struct mlxsw_sp_acl_tcam_region;
+
+struct mlxsw_sp_acl_tcam_ops {
+ enum mlxsw_reg_ptar_key_type key_type;
+ size_t priv_size;
+ int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ struct mlxsw_sp_acl_tcam *tcam);
+ void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
+ size_t region_priv_size;
+ int (*region_init)(struct mlxsw_sp *mlxsw_sp, void *region_priv,
+ void *tcam_priv,
+ struct mlxsw_sp_acl_tcam_region *region);
+ void (*region_fini)(struct mlxsw_sp *mlxsw_sp, void *region_priv);
+ int (*region_associate)(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region);
+ size_t chunk_priv_size;
+ void (*chunk_init)(void *region_priv, void *chunk_priv,
+ unsigned int priority);
+ void (*chunk_fini)(void *chunk_priv);
+ size_t entry_priv_size;
+ int (*entry_add)(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv,
+ struct mlxsw_sp_acl_rule_info *rulei);
+ void (*entry_del)(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv);
+ int (*entry_activity_get)(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *entry_priv,
+ bool *activity);
+};
+
+/* spectrum1_acl_tcam.c */
+extern const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops;
+
+/* spectrum2_acl_tcam.c */
+extern const struct mlxsw_sp_acl_tcam_ops mlxsw_sp2_acl_tcam_ops;
+
+/* spectrum_acl_flex_actions.c */
+extern const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops;
+extern const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops;
+
+/* spectrum_acl_flex_keys.c */
+extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops;
+extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops;
/* spectrum_flower.c */
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
@@ -594,6 +644,12 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f);
+int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct tc_cls_flower_offload *f);
+void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct tc_cls_flower_offload *f);
/* spectrum_qdisc.c */
int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port);
@@ -631,4 +687,40 @@ void mlxsw_sp_port_fids_fini(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp);
+/* spectrum_mr.c */
+enum mlxsw_sp_mr_route_prio {
+ MLXSW_SP_MR_ROUTE_PRIO_SG,
+ MLXSW_SP_MR_ROUTE_PRIO_STARG,
+ MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
+ __MLXSW_SP_MR_ROUTE_PRIO_MAX
+};
+
+#define MLXSW_SP_MR_ROUTE_PRIO_MAX (__MLXSW_SP_MR_ROUTE_PRIO_MAX - 1)
+
+struct mlxsw_sp_mr_route_key;
+
+struct mlxsw_sp_mr_tcam_ops {
+ size_t priv_size;
+ int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
+ void (*fini)(void *priv);
+ size_t route_priv_size;
+ int (*route_create)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block,
+ enum mlxsw_sp_mr_route_prio prio);
+ void (*route_destroy)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key);
+ int (*route_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block);
+};
+
+/* spectrum1_mr_tcam.c */
+extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp1_mr_tcam_ops;
+
+/* spectrum2_mr_tcam.c */
+extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops;
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
new file mode 100644
index 000000000000..2a9eac90002e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "reg.h"
+#include "core.h"
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+
+struct mlxsw_sp1_acl_tcam_region {
+ struct mlxsw_sp_acl_ctcam_region cregion;
+ struct mlxsw_sp_acl_tcam_region *region;
+ struct {
+ struct mlxsw_sp_acl_ctcam_chunk cchunk;
+ struct mlxsw_sp_acl_ctcam_entry centry;
+ struct mlxsw_sp_acl_rule_info *rulei;
+ } catchall;
+};
+
+struct mlxsw_sp1_acl_tcam_chunk {
+ struct mlxsw_sp_acl_ctcam_chunk cchunk;
+};
+
+struct mlxsw_sp1_acl_tcam_entry {
+ struct mlxsw_sp_acl_ctcam_entry centry;
+};
+
+static int
+mlxsw_sp1_acl_ctcam_region_entry_insert(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ const char *mask)
+{
+ return 0;
+}
+
+static void
+mlxsw_sp1_acl_ctcam_region_entry_remove(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+}
+
+static const struct mlxsw_sp_acl_ctcam_region_ops
+mlxsw_sp1_acl_ctcam_region_ops = {
+ .entry_insert = mlxsw_sp1_acl_ctcam_region_entry_insert,
+ .entry_remove = mlxsw_sp1_acl_ctcam_region_entry_remove,
+};
+
+static int mlxsw_sp1_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
+ struct mlxsw_sp_acl_tcam *tcam)
+{
+ return 0;
+}
+
+static void mlxsw_sp1_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+}
+
+static int
+mlxsw_sp1_acl_ctcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_acl_tcam_region *region)
+{
+ struct mlxsw_sp_acl_rule_info *rulei;
+ int err;
+
+ mlxsw_sp_acl_ctcam_chunk_init(&region->cregion,
+ &region->catchall.cchunk,
+ MLXSW_SP_ACL_TCAM_CATCHALL_PRIO);
+ rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
+ if (IS_ERR(rulei)) {
+ err = PTR_ERR(rulei);
+ goto err_rulei_create;
+ }
+ err = mlxsw_sp_acl_rulei_act_continue(rulei);
+ if (WARN_ON(err))
+ goto err_rulei_act_continue;
+ err = mlxsw_sp_acl_rulei_commit(rulei);
+ if (err)
+ goto err_rulei_commit;
+ err = mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, &region->cregion,
+ &region->catchall.cchunk,
+ &region->catchall.centry,
+ rulei, false);
+ if (err)
+ goto err_entry_add;
+ region->catchall.rulei = rulei;
+ return 0;
+
+err_entry_add:
+err_rulei_commit:
+err_rulei_act_continue:
+ mlxsw_sp_acl_rulei_destroy(rulei);
+err_rulei_create:
+ mlxsw_sp_acl_ctcam_chunk_fini(&region->catchall.cchunk);
+ return err;
+}
+
+static void
+mlxsw_sp1_acl_ctcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_acl_tcam_region *region)
+{
+ struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei;
+
+ mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, &region->cregion,
+ &region->catchall.cchunk,
+ &region->catchall.centry);
+ mlxsw_sp_acl_rulei_destroy(rulei);
+ mlxsw_sp_acl_ctcam_chunk_fini(&region->catchall.cchunk);
+}
+
+static int
+mlxsw_sp1_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv,
+ void *tcam_priv,
+ struct mlxsw_sp_acl_tcam_region *_region)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+ int err;
+
+ err = mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, &region->cregion,
+ _region,
+ &mlxsw_sp1_acl_ctcam_region_ops);
+ if (err)
+ return err;
+ err = mlxsw_sp1_acl_ctcam_region_catchall_add(mlxsw_sp, region);
+ if (err)
+ goto err_catchall_add;
+ region->region = _region;
+ return 0;
+
+err_catchall_add:
+ mlxsw_sp_acl_ctcam_region_fini(&region->cregion);
+ return err;
+}
+
+static void
+mlxsw_sp1_acl_tcam_region_fini(struct mlxsw_sp *mlxsw_sp, void *region_priv)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+
+ mlxsw_sp1_acl_ctcam_region_catchall_del(mlxsw_sp, region);
+ mlxsw_sp_acl_ctcam_region_fini(&region->cregion);
+}
+
+static int
+mlxsw_sp1_acl_tcam_region_associate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ return 0;
+}
+
+static void mlxsw_sp1_acl_tcam_chunk_init(void *region_priv, void *chunk_priv,
+ unsigned int priority)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv;
+
+ mlxsw_sp_acl_ctcam_chunk_init(&region->cregion, &chunk->cchunk,
+ priority);
+}
+
+static void mlxsw_sp1_acl_tcam_chunk_fini(void *chunk_priv)
+{
+ struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv;
+
+ mlxsw_sp_acl_ctcam_chunk_fini(&chunk->cchunk);
+}
+
+static int mlxsw_sp1_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv;
+ struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv;
+
+ return mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, &region->cregion,
+ &chunk->cchunk, &entry->centry,
+ rulei, false);
+}
+
+static void mlxsw_sp1_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv;
+ struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv;
+
+ mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, &region->cregion,
+ &chunk->cchunk, &entry->centry);
+}
+
+static int
+mlxsw_sp1_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *_region,
+ unsigned int offset,
+ bool *activity)
+{
+ char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+ int err;
+
+ mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ,
+ _region->tcam_region_info, offset, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+ if (err)
+ return err;
+ *activity = mlxsw_reg_ptce2_a_get(ptce2_pl);
+ return 0;
+}
+
+static int
+mlxsw_sp1_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *entry_priv,
+ bool *activity)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv;
+ unsigned int offset;
+
+ offset = mlxsw_sp_acl_ctcam_entry_offset(&entry->centry);
+ return mlxsw_sp1_acl_tcam_region_entry_activity_get(mlxsw_sp,
+ region->region,
+ offset, activity);
+}
+
+const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops = {
+ .key_type = MLXSW_REG_PTAR_KEY_TYPE_FLEX,
+ .priv_size = 0,
+ .init = mlxsw_sp1_acl_tcam_init,
+ .fini = mlxsw_sp1_acl_tcam_fini,
+ .region_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_region),
+ .region_init = mlxsw_sp1_acl_tcam_region_init,
+ .region_fini = mlxsw_sp1_acl_tcam_region_fini,
+ .region_associate = mlxsw_sp1_acl_tcam_region_associate,
+ .chunk_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_chunk),
+ .chunk_init = mlxsw_sp1_acl_tcam_chunk_init,
+ .chunk_fini = mlxsw_sp1_acl_tcam_chunk_fini,
+ .entry_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_entry),
+ .entry_add = mlxsw_sp1_acl_tcam_entry_add,
+ .entry_del = mlxsw_sp1_acl_tcam_entry_del,
+ .entry_activity_get = mlxsw_sp1_acl_tcam_entry_activity_get,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
new file mode 100644
index 000000000000..09ee0a807747
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+
+#include "spectrum.h"
+
+#define MLXSW_SP1_KVDL_SINGLE_BASE 0
+#define MLXSW_SP1_KVDL_SINGLE_SIZE 16384
+#define MLXSW_SP1_KVDL_SINGLE_END \
+ (MLXSW_SP1_KVDL_SINGLE_SIZE + MLXSW_SP1_KVDL_SINGLE_BASE - 1)
+
+#define MLXSW_SP1_KVDL_CHUNKS_BASE \
+ (MLXSW_SP1_KVDL_SINGLE_BASE + MLXSW_SP1_KVDL_SINGLE_SIZE)
+#define MLXSW_SP1_KVDL_CHUNKS_SIZE 49152
+#define MLXSW_SP1_KVDL_CHUNKS_END \
+ (MLXSW_SP1_KVDL_CHUNKS_SIZE + MLXSW_SP1_KVDL_CHUNKS_BASE - 1)
+
+#define MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE \
+ (MLXSW_SP1_KVDL_CHUNKS_BASE + MLXSW_SP1_KVDL_CHUNKS_SIZE)
+#define MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE \
+ (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE)
+#define MLXSW_SP1_KVDL_LARGE_CHUNKS_END \
+ (MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE + MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE - 1)
+
+#define MLXSW_SP1_KVDL_SINGLE_ALLOC_SIZE 1
+#define MLXSW_SP1_KVDL_CHUNKS_ALLOC_SIZE 32
+#define MLXSW_SP1_KVDL_LARGE_CHUNKS_ALLOC_SIZE 512
+
+struct mlxsw_sp1_kvdl_part_info {
+ unsigned int part_index;
+ unsigned int start_index;
+ unsigned int end_index;
+ unsigned int alloc_size;
+ enum mlxsw_sp_resource_id resource_id;
+};
+
+enum mlxsw_sp1_kvdl_part_id {
+ MLXSW_SP1_KVDL_PART_ID_SINGLE,
+ MLXSW_SP1_KVDL_PART_ID_CHUNKS,
+ MLXSW_SP1_KVDL_PART_ID_LARGE_CHUNKS,
+};
+
+#define MLXSW_SP1_KVDL_PART_INFO(id) \
+[MLXSW_SP1_KVDL_PART_ID_##id] = { \
+ .start_index = MLXSW_SP1_KVDL_##id##_BASE, \
+ .end_index = MLXSW_SP1_KVDL_##id##_END, \
+ .alloc_size = MLXSW_SP1_KVDL_##id##_ALLOC_SIZE, \
+ .resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_##id, \
+}
+
+static const struct mlxsw_sp1_kvdl_part_info mlxsw_sp1_kvdl_parts_info[] = {
+ MLXSW_SP1_KVDL_PART_INFO(SINGLE),
+ MLXSW_SP1_KVDL_PART_INFO(CHUNKS),
+ MLXSW_SP1_KVDL_PART_INFO(LARGE_CHUNKS),
+};
+
+#define MLXSW_SP1_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp1_kvdl_parts_info)
+
+struct mlxsw_sp1_kvdl_part {
+ struct mlxsw_sp1_kvdl_part_info info;
+ unsigned long usage[0]; /* Entries */
+};
+
+struct mlxsw_sp1_kvdl {
+ struct mlxsw_sp1_kvdl_part *parts[MLXSW_SP1_KVDL_PARTS_INFO_LEN];
+};
+
+static struct mlxsw_sp1_kvdl_part *
+mlxsw_sp1_kvdl_alloc_size_part(struct mlxsw_sp1_kvdl *kvdl,
+ unsigned int alloc_size)
+{
+ struct mlxsw_sp1_kvdl_part *part, *min_part = NULL;
+ int i;
+
+ for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) {
+ part = kvdl->parts[i];
+ if (alloc_size <= part->info.alloc_size &&
+ (!min_part ||
+ part->info.alloc_size <= min_part->info.alloc_size))
+ min_part = part;
+ }
+
+ return min_part ?: ERR_PTR(-ENOBUFS);
+}
+
+static struct mlxsw_sp1_kvdl_part *
+mlxsw_sp1_kvdl_index_part(struct mlxsw_sp1_kvdl *kvdl, u32 kvdl_index)
+{
+ struct mlxsw_sp1_kvdl_part *part;
+ int i;
+
+ for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) {
+ part = kvdl->parts[i];
+ if (kvdl_index >= part->info.start_index &&
+ kvdl_index <= part->info.end_index)
+ return part;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static u32
+mlxsw_sp1_kvdl_to_kvdl_index(const struct mlxsw_sp1_kvdl_part_info *info,
+ unsigned int entry_index)
+{
+ return info->start_index + entry_index * info->alloc_size;
+}
+
+static unsigned int
+mlxsw_sp1_kvdl_to_entry_index(const struct mlxsw_sp1_kvdl_part_info *info,
+ u32 kvdl_index)
+{
+ return (kvdl_index - info->start_index) / info->alloc_size;
+}
+
+static int mlxsw_sp1_kvdl_part_alloc(struct mlxsw_sp1_kvdl_part *part,
+ u32 *p_kvdl_index)
+{
+ const struct mlxsw_sp1_kvdl_part_info *info = &part->info;
+ unsigned int entry_index, nr_entries;
+
+ nr_entries = (info->end_index - info->start_index + 1) /
+ info->alloc_size;
+ entry_index = find_first_zero_bit(part->usage, nr_entries);
+ if (entry_index == nr_entries)
+ return -ENOBUFS;
+ __set_bit(entry_index, part->usage);
+
+ *p_kvdl_index = mlxsw_sp1_kvdl_to_kvdl_index(info, entry_index);
+
+ return 0;
+}
+
+static void mlxsw_sp1_kvdl_part_free(struct mlxsw_sp1_kvdl_part *part,
+ u32 kvdl_index)
+{
+ const struct mlxsw_sp1_kvdl_part_info *info = &part->info;
+ unsigned int entry_index;
+
+ entry_index = mlxsw_sp1_kvdl_to_entry_index(info, kvdl_index);
+ __clear_bit(entry_index, part->usage);
+}
+
+static int mlxsw_sp1_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ u32 *p_entry_index)
+{
+ struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ /* Find partition with smallest allocation size satisfying the
+ * requested size.
+ */
+ part = mlxsw_sp1_kvdl_alloc_size_part(kvdl, entry_count);
+ if (IS_ERR(part))
+ return PTR_ERR(part);
+
+ return mlxsw_sp1_kvdl_part_alloc(part, p_entry_index);
+}
+
+static void mlxsw_sp1_kvdl_free(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, int entry_index)
+{
+ struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ part = mlxsw_sp1_kvdl_index_part(kvdl, entry_index);
+ if (IS_ERR(part))
+ return;
+ mlxsw_sp1_kvdl_part_free(part, entry_index);
+}
+
+static int mlxsw_sp1_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
+ void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ unsigned int *p_alloc_size)
+{
+ struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ part = mlxsw_sp1_kvdl_alloc_size_part(kvdl, entry_count);
+ if (IS_ERR(part))
+ return PTR_ERR(part);
+
+ *p_alloc_size = part->info.alloc_size;
+
+ return 0;
+}
+
+static void mlxsw_sp1_kvdl_part_update(struct mlxsw_sp1_kvdl_part *part,
+ struct mlxsw_sp1_kvdl_part *part_prev,
+ unsigned int size)
+{
+ if (!part_prev) {
+ part->info.end_index = size - 1;
+ } else {
+ part->info.start_index = part_prev->info.end_index + 1;
+ part->info.end_index = part->info.start_index + size - 1;
+ }
+}
+
+static struct mlxsw_sp1_kvdl_part *
+mlxsw_sp1_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp1_kvdl_part_info *info,
+ struct mlxsw_sp1_kvdl_part *part_prev)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp1_kvdl_part *part;
+ bool need_update = true;
+ unsigned int nr_entries;
+ size_t usage_size;
+ u64 resource_size;
+ int err;
+
+ err = devlink_resource_size_get(devlink, info->resource_id,
+ &resource_size);
+ if (err) {
+ need_update = false;
+ resource_size = info->end_index - info->start_index + 1;
+ }
+
+ nr_entries = div_u64(resource_size, info->alloc_size);
+ usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long);
+ part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
+ if (!part)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&part->info, info, sizeof(part->info));
+
+ if (need_update)
+ mlxsw_sp1_kvdl_part_update(part, part_prev, resource_size);
+ return part;
+}
+
+static void mlxsw_sp1_kvdl_part_fini(struct mlxsw_sp1_kvdl_part *part)
+{
+ kfree(part);
+}
+
+static int mlxsw_sp1_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_kvdl *kvdl)
+{
+ const struct mlxsw_sp1_kvdl_part_info *info;
+ struct mlxsw_sp1_kvdl_part *part_prev = NULL;
+ int err, i;
+
+ for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) {
+ info = &mlxsw_sp1_kvdl_parts_info[i];
+ kvdl->parts[i] = mlxsw_sp1_kvdl_part_init(mlxsw_sp, info,
+ part_prev);
+ if (IS_ERR(kvdl->parts[i])) {
+ err = PTR_ERR(kvdl->parts[i]);
+ goto err_kvdl_part_init;
+ }
+ part_prev = kvdl->parts[i];
+ }
+ return 0;
+
+err_kvdl_part_init:
+ for (i--; i >= 0; i--)
+ mlxsw_sp1_kvdl_part_fini(kvdl->parts[i]);
+ return err;
+}
+
+static void mlxsw_sp1_kvdl_parts_fini(struct mlxsw_sp1_kvdl *kvdl)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++)
+ mlxsw_sp1_kvdl_part_fini(kvdl->parts[i]);
+}
+
+static u64 mlxsw_sp1_kvdl_part_occ(struct mlxsw_sp1_kvdl_part *part)
+{
+ const struct mlxsw_sp1_kvdl_part_info *info = &part->info;
+ unsigned int nr_entries;
+ int bit = -1;
+ u64 occ = 0;
+
+ nr_entries = (info->end_index -
+ info->start_index + 1) /
+ info->alloc_size;
+ while ((bit = find_next_bit(part->usage, nr_entries, bit + 1))
+ < nr_entries)
+ occ += info->alloc_size;
+ return occ;
+}
+
+static u64 mlxsw_sp1_kvdl_occ_get(void *priv)
+{
+ const struct mlxsw_sp1_kvdl *kvdl = priv;
+ u64 occ = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++)
+ occ += mlxsw_sp1_kvdl_part_occ(kvdl->parts[i]);
+
+ return occ;
+}
+
+static u64 mlxsw_sp1_kvdl_single_occ_get(void *priv)
+{
+ const struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_SINGLE];
+ return mlxsw_sp1_kvdl_part_occ(part);
+}
+
+static u64 mlxsw_sp1_kvdl_chunks_occ_get(void *priv)
+{
+ const struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_CHUNKS];
+ return mlxsw_sp1_kvdl_part_occ(part);
+}
+
+static u64 mlxsw_sp1_kvdl_large_chunks_occ_get(void *priv)
+{
+ const struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_LARGE_CHUNKS];
+ return mlxsw_sp1_kvdl_part_occ(part);
+}
+
+static int mlxsw_sp1_kvdl_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp1_kvdl *kvdl = priv;
+ int err;
+
+ err = mlxsw_sp1_kvdl_parts_init(mlxsw_sp, kvdl);
+ if (err)
+ return err;
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ mlxsw_sp1_kvdl_occ_get,
+ kvdl);
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+ mlxsw_sp1_kvdl_single_occ_get,
+ kvdl);
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+ mlxsw_sp1_kvdl_chunks_occ_get,
+ kvdl);
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+ mlxsw_sp1_kvdl_large_chunks_occ_get,
+ kvdl);
+ return 0;
+}
+
+static void mlxsw_sp1_kvdl_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp1_kvdl *kvdl = priv;
+
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS);
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS);
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE);
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR);
+ mlxsw_sp1_kvdl_parts_fini(kvdl);
+}
+
+const struct mlxsw_sp_kvdl_ops mlxsw_sp1_kvdl_ops = {
+ .priv_size = sizeof(struct mlxsw_sp1_kvdl),
+ .init = mlxsw_sp1_kvdl_init,
+ .fini = mlxsw_sp1_kvdl_fini,
+ .alloc = mlxsw_sp1_kvdl_alloc,
+ .free = mlxsw_sp1_kvdl_free,
+ .alloc_size_query = mlxsw_sp1_kvdl_alloc_size_query,
+};
+
+int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ static struct devlink_resource_size_params size_params;
+ u32 kvdl_max_size;
+ int err;
+
+ kvdl_max_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
+ MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) -
+ MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE);
+
+ devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
+ MLXSW_SP1_KVDL_SINGLE_ALLOC_SIZE,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES,
+ MLXSW_SP1_KVDL_SINGLE_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params);
+ if (err)
+ return err;
+
+ devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
+ MLXSW_SP1_KVDL_CHUNKS_ALLOC_SIZE,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS,
+ MLXSW_SP1_KVDL_CHUNKS_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params);
+ if (err)
+ return err;
+
+ devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
+ MLXSW_SP1_KVDL_LARGE_CHUNKS_ALLOC_SIZE,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS,
+ MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c
new file mode 100644
index 000000000000..c8c67536917b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/parman.h>
+
+#include "reg.h"
+#include "spectrum.h"
+#include "core_acl_flex_actions.h"
+#include "spectrum_mr.h"
+
+struct mlxsw_sp1_mr_tcam_region {
+ struct mlxsw_sp *mlxsw_sp;
+ enum mlxsw_reg_rtar_key_type rtar_key_type;
+ struct parman *parman;
+ struct parman_prio *parman_prios;
+};
+
+struct mlxsw_sp1_mr_tcam {
+ struct mlxsw_sp1_mr_tcam_region tcam_regions[MLXSW_SP_L3_PROTO_MAX];
+};
+
+struct mlxsw_sp1_mr_tcam_route {
+ struct parman_item parman_item;
+ struct parman_prio *parman_prio;
+};
+
+static int mlxsw_sp1_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp,
+ struct parman_item *parman_item,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block)
+{
+ char rmft2_pl[MLXSW_REG_RMFT2_LEN];
+
+ switch (key->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, true, parman_item->index,
+ key->vrid,
+ MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
+ ntohl(key->group.addr4),
+ ntohl(key->group_mask.addr4),
+ ntohl(key->source.addr4),
+ ntohl(key->source_mask.addr4),
+ mlxsw_afa_block_first_set(afa_block));
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, true, parman_item->index,
+ key->vrid,
+ MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
+ key->group.addr6,
+ key->group_mask.addr6,
+ key->source.addr6,
+ key->source_mask.addr6,
+ mlxsw_afa_block_first_set(afa_block));
+ }
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
+}
+
+static int mlxsw_sp1_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp,
+ struct parman_item *parman_item,
+ struct mlxsw_sp_mr_route_key *key)
+{
+ struct in6_addr zero_addr = IN6ADDR_ANY_INIT;
+ char rmft2_pl[MLXSW_REG_RMFT2_LEN];
+
+ switch (key->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index,
+ key->vrid, 0, 0, 0, 0, 0, 0, NULL);
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, false, parman_item->index,
+ key->vrid, 0, 0, zero_addr, zero_addr,
+ zero_addr, zero_addr, NULL);
+ break;
+ }
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
+}
+
+static struct mlxsw_sp1_mr_tcam_region *
+mlxsw_sp1_mr_tcam_protocol_region(struct mlxsw_sp1_mr_tcam *mr_tcam,
+ enum mlxsw_sp_l3proto proto)
+{
+ return &mr_tcam->tcam_regions[proto];
+}
+
+static int
+mlxsw_sp1_mr_tcam_route_parman_item_add(struct mlxsw_sp1_mr_tcam *mr_tcam,
+ struct mlxsw_sp1_mr_tcam_route *route,
+ struct mlxsw_sp_mr_route_key *key,
+ enum mlxsw_sp_mr_route_prio prio)
+{
+ struct mlxsw_sp1_mr_tcam_region *tcam_region;
+ int err;
+
+ tcam_region = mlxsw_sp1_mr_tcam_protocol_region(mr_tcam, key->proto);
+ err = parman_item_add(tcam_region->parman,
+ &tcam_region->parman_prios[prio],
+ &route->parman_item);
+ if (err)
+ return err;
+
+ route->parman_prio = &tcam_region->parman_prios[prio];
+ return 0;
+}
+
+static void
+mlxsw_sp1_mr_tcam_route_parman_item_remove(struct mlxsw_sp1_mr_tcam *mr_tcam,
+ struct mlxsw_sp1_mr_tcam_route *route,
+ struct mlxsw_sp_mr_route_key *key)
+{
+ struct mlxsw_sp1_mr_tcam_region *tcam_region;
+
+ tcam_region = mlxsw_sp1_mr_tcam_protocol_region(mr_tcam, key->proto);
+ parman_item_remove(tcam_region->parman,
+ route->parman_prio, &route->parman_item);
+}
+
+static int
+mlxsw_sp1_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block,
+ enum mlxsw_sp_mr_route_prio prio)
+{
+ struct mlxsw_sp1_mr_tcam_route *route = route_priv;
+ struct mlxsw_sp1_mr_tcam *mr_tcam = priv;
+ int err;
+
+ err = mlxsw_sp1_mr_tcam_route_parman_item_add(mr_tcam, route,
+ key, prio);
+ if (err)
+ return err;
+
+ err = mlxsw_sp1_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
+ key, afa_block);
+ if (err)
+ goto err_route_replace;
+ return 0;
+
+err_route_replace:
+ mlxsw_sp1_mr_tcam_route_parman_item_remove(mr_tcam, route, key);
+ return err;
+}
+
+static void
+mlxsw_sp1_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key)
+{
+ struct mlxsw_sp1_mr_tcam_route *route = route_priv;
+ struct mlxsw_sp1_mr_tcam *mr_tcam = priv;
+
+ mlxsw_sp1_mr_tcam_route_remove(mlxsw_sp, &route->parman_item, key);
+ mlxsw_sp1_mr_tcam_route_parman_item_remove(mr_tcam, route, key);
+}
+
+static int
+mlxsw_sp1_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block)
+{
+ struct mlxsw_sp1_mr_tcam_route *route = route_priv;
+
+ return mlxsw_sp1_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
+ key, afa_block);
+}
+
+#define MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT 16
+#define MLXSW_SP1_MR_TCAM_REGION_RESIZE_STEP 16
+
+static int
+mlxsw_sp1_mr_tcam_region_alloc(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+ char rtar_pl[MLXSW_REG_RTAR_LEN];
+
+ mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_ALLOCATE,
+ mr_tcam_region->rtar_key_type,
+ MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
+}
+
+static void
+mlxsw_sp1_mr_tcam_region_free(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+ char rtar_pl[MLXSW_REG_RTAR_LEN];
+
+ mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_DEALLOCATE,
+ mr_tcam_region->rtar_key_type, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
+}
+
+static int mlxsw_sp1_mr_tcam_region_parman_resize(void *priv,
+ unsigned long new_count)
+{
+ struct mlxsw_sp1_mr_tcam_region *mr_tcam_region = priv;
+ struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+ char rtar_pl[MLXSW_REG_RTAR_LEN];
+ u64 max_tcam_rules;
+
+ max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
+ if (new_count > max_tcam_rules)
+ return -EINVAL;
+ mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_RESIZE,
+ mr_tcam_region->rtar_key_type, new_count);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
+}
+
+static void mlxsw_sp1_mr_tcam_region_parman_move(void *priv,
+ unsigned long from_index,
+ unsigned long to_index,
+ unsigned long count)
+{
+ struct mlxsw_sp1_mr_tcam_region *mr_tcam_region = priv;
+ struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+ char rrcr_pl[MLXSW_REG_RRCR_LEN];
+
+ mlxsw_reg_rrcr_pack(rrcr_pl, MLXSW_REG_RRCR_OP_MOVE,
+ from_index, count,
+ mr_tcam_region->rtar_key_type, to_index);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rrcr), rrcr_pl);
+}
+
+static const struct parman_ops mlxsw_sp1_mr_tcam_region_parman_ops = {
+ .base_count = MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT,
+ .resize_step = MLXSW_SP1_MR_TCAM_REGION_RESIZE_STEP,
+ .resize = mlxsw_sp1_mr_tcam_region_parman_resize,
+ .move = mlxsw_sp1_mr_tcam_region_parman_move,
+ .algo = PARMAN_ALGO_TYPE_LSORT,
+};
+
+static int
+mlxsw_sp1_mr_tcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_mr_tcam_region *mr_tcam_region,
+ enum mlxsw_reg_rtar_key_type rtar_key_type)
+{
+ struct parman_prio *parman_prios;
+ struct parman *parman;
+ int err;
+ int i;
+
+ mr_tcam_region->rtar_key_type = rtar_key_type;
+ mr_tcam_region->mlxsw_sp = mlxsw_sp;
+
+ err = mlxsw_sp1_mr_tcam_region_alloc(mr_tcam_region);
+ if (err)
+ return err;
+
+ parman = parman_create(&mlxsw_sp1_mr_tcam_region_parman_ops,
+ mr_tcam_region);
+ if (!parman) {
+ err = -ENOMEM;
+ goto err_parman_create;
+ }
+ mr_tcam_region->parman = parman;
+
+ parman_prios = kmalloc_array(MLXSW_SP_MR_ROUTE_PRIO_MAX + 1,
+ sizeof(*parman_prios), GFP_KERNEL);
+ if (!parman_prios) {
+ err = -ENOMEM;
+ goto err_parman_prios_alloc;
+ }
+ mr_tcam_region->parman_prios = parman_prios;
+
+ for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
+ parman_prio_init(mr_tcam_region->parman,
+ &mr_tcam_region->parman_prios[i], i);
+ return 0;
+
+err_parman_prios_alloc:
+ parman_destroy(parman);
+err_parman_create:
+ mlxsw_sp1_mr_tcam_region_free(mr_tcam_region);
+ return err;
+}
+
+static void
+mlxsw_sp1_mr_tcam_region_fini(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
+ parman_prio_fini(&mr_tcam_region->parman_prios[i]);
+ kfree(mr_tcam_region->parman_prios);
+ parman_destroy(mr_tcam_region->parman);
+ mlxsw_sp1_mr_tcam_region_free(mr_tcam_region);
+}
+
+static int mlxsw_sp1_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct mlxsw_sp1_mr_tcam *mr_tcam = priv;
+ struct mlxsw_sp1_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
+ u32 rtar_key;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES))
+ return -EIO;
+
+ rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST;
+ err = mlxsw_sp1_mr_tcam_region_init(mlxsw_sp,
+ &region[MLXSW_SP_L3_PROTO_IPV4],
+ rtar_key);
+ if (err)
+ return err;
+
+ rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST;
+ err = mlxsw_sp1_mr_tcam_region_init(mlxsw_sp,
+ &region[MLXSW_SP_L3_PROTO_IPV6],
+ rtar_key);
+ if (err)
+ goto err_ipv6_region_init;
+
+ return 0;
+
+err_ipv6_region_init:
+ mlxsw_sp1_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
+ return err;
+}
+
+static void mlxsw_sp1_mr_tcam_fini(void *priv)
+{
+ struct mlxsw_sp1_mr_tcam *mr_tcam = priv;
+ struct mlxsw_sp1_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
+
+ mlxsw_sp1_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV6]);
+ mlxsw_sp1_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
+}
+
+const struct mlxsw_sp_mr_tcam_ops mlxsw_sp1_mr_tcam_ops = {
+ .priv_size = sizeof(struct mlxsw_sp1_mr_tcam),
+ .init = mlxsw_sp1_mr_tcam_init,
+ .fini = mlxsw_sp1_mr_tcam_fini,
+ .route_priv_size = sizeof(struct mlxsw_sp1_mr_tcam_route),
+ .route_create = mlxsw_sp1_mr_tcam_route_create,
+ .route_destroy = mlxsw_sp1_mr_tcam_route_destroy,
+ .route_update = mlxsw_sp1_mr_tcam_route_update,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
new file mode 100644
index 000000000000..8ca77f3e8f27
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+#include "core_acl_flex_actions.h"
+
+struct mlxsw_sp2_acl_tcam {
+ struct mlxsw_sp_acl_atcam atcam;
+ u32 kvdl_index;
+ unsigned int kvdl_count;
+};
+
+struct mlxsw_sp2_acl_tcam_region {
+ struct mlxsw_sp_acl_atcam_region aregion;
+ struct mlxsw_sp_acl_tcam_region *region;
+};
+
+struct mlxsw_sp2_acl_tcam_chunk {
+ struct mlxsw_sp_acl_atcam_chunk achunk;
+};
+
+struct mlxsw_sp2_acl_tcam_entry {
+ struct mlxsw_sp_acl_atcam_entry aentry;
+ struct mlxsw_afa_block *act_block;
+};
+
+static int
+mlxsw_sp2_acl_ctcam_region_entry_insert(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ const char *mask)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion;
+ struct mlxsw_sp_acl_atcam_entry *aentry;
+ struct mlxsw_sp_acl_erp *erp;
+
+ aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion);
+ aentry = mlxsw_sp_acl_tcam_centry_aentry(centry);
+
+ erp = mlxsw_sp_acl_erp_get(aregion, mask, true);
+ if (IS_ERR(erp))
+ return PTR_ERR(erp);
+ aentry->erp = erp;
+
+ return 0;
+}
+
+static void
+mlxsw_sp2_acl_ctcam_region_entry_remove(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion;
+ struct mlxsw_sp_acl_atcam_entry *aentry;
+
+ aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion);
+ aentry = mlxsw_sp_acl_tcam_centry_aentry(centry);
+
+ mlxsw_sp_acl_erp_put(aregion, aentry->erp);
+}
+
+static const struct mlxsw_sp_acl_ctcam_region_ops
+mlxsw_sp2_acl_ctcam_region_ops = {
+ .entry_insert = mlxsw_sp2_acl_ctcam_region_entry_insert,
+ .entry_remove = mlxsw_sp2_acl_ctcam_region_entry_remove,
+};
+
+static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
+ struct mlxsw_sp_acl_tcam *_tcam)
+{
+ struct mlxsw_sp2_acl_tcam *tcam = priv;
+ struct mlxsw_afa_block *afa_block;
+ char pefa_pl[MLXSW_REG_PEFA_LEN];
+ char pgcr_pl[MLXSW_REG_PGCR_LEN];
+ char *enc_actions;
+ int i;
+ int err;
+
+ tcam->kvdl_count = _tcam->max_regions;
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ tcam->kvdl_count, &tcam->kvdl_index);
+ if (err)
+ return err;
+
+ /* Create flex action block, set default action (continue)
+ * but don't commit. We need just the current set encoding
+ * to be written using PEFA register to all indexes for all regions.
+ */
+ afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
+ if (!afa_block) {
+ err = -ENOMEM;
+ goto err_afa_block;
+ }
+ err = mlxsw_afa_block_continue(afa_block);
+ if (WARN_ON(err))
+ goto err_afa_block_continue;
+ enc_actions = mlxsw_afa_block_cur_set(afa_block);
+
+ for (i = 0; i < tcam->kvdl_count; i++) {
+ mlxsw_reg_pefa_pack(pefa_pl, tcam->kvdl_index + i,
+ true, enc_actions);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
+ if (err)
+ goto err_pefa_write;
+ }
+ mlxsw_reg_pgcr_pack(pgcr_pl, tcam->kvdl_index);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pgcr), pgcr_pl);
+ if (err)
+ goto err_pgcr_write;
+
+ err = mlxsw_sp_acl_atcam_init(mlxsw_sp, &tcam->atcam);
+ if (err)
+ goto err_atcam_init;
+
+ mlxsw_afa_block_destroy(afa_block);
+ return 0;
+
+err_atcam_init:
+err_pgcr_write:
+err_pefa_write:
+err_afa_block_continue:
+ mlxsw_afa_block_destroy(afa_block);
+err_afa_block:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ tcam->kvdl_count, tcam->kvdl_index);
+ return err;
+}
+
+static void mlxsw_sp2_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct mlxsw_sp2_acl_tcam *tcam = priv;
+
+ mlxsw_sp_acl_atcam_fini(mlxsw_sp, &tcam->atcam);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ tcam->kvdl_count, tcam->kvdl_index);
+}
+
+static int
+mlxsw_sp2_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv,
+ void *tcam_priv,
+ struct mlxsw_sp_acl_tcam_region *_region)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp2_acl_tcam *tcam = tcam_priv;
+
+ region->region = _region;
+
+ return mlxsw_sp_acl_atcam_region_init(mlxsw_sp, &tcam->atcam,
+ &region->aregion, _region,
+ &mlxsw_sp2_acl_ctcam_region_ops);
+}
+
+static void
+mlxsw_sp2_acl_tcam_region_fini(struct mlxsw_sp *mlxsw_sp, void *region_priv)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+
+ mlxsw_sp_acl_atcam_region_fini(&region->aregion);
+}
+
+static int
+mlxsw_sp2_acl_tcam_region_associate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ return mlxsw_sp_acl_atcam_region_associate(mlxsw_sp, region->id);
+}
+
+static void mlxsw_sp2_acl_tcam_chunk_init(void *region_priv, void *chunk_priv,
+ unsigned int priority)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
+
+ mlxsw_sp_acl_atcam_chunk_init(&region->aregion, &chunk->achunk,
+ priority);
+}
+
+static void mlxsw_sp2_acl_tcam_chunk_fini(void *chunk_priv)
+{
+ struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
+
+ mlxsw_sp_acl_atcam_chunk_fini(&chunk->achunk);
+}
+
+static int mlxsw_sp2_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
+ struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
+
+ entry->act_block = rulei->act_block;
+ return mlxsw_sp_acl_atcam_entry_add(mlxsw_sp, &region->aregion,
+ &chunk->achunk, &entry->aentry,
+ rulei);
+}
+
+static void mlxsw_sp2_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
+ struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
+
+ mlxsw_sp_acl_atcam_entry_del(mlxsw_sp, &region->aregion, &chunk->achunk,
+ &entry->aentry);
+}
+
+static int
+mlxsw_sp2_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *entry_priv,
+ bool *activity)
+{
+ struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
+
+ return mlxsw_afa_block_activity_get(entry->act_block, activity);
+}
+
+const struct mlxsw_sp_acl_tcam_ops mlxsw_sp2_acl_tcam_ops = {
+ .key_type = MLXSW_REG_PTAR_KEY_TYPE_FLEX2,
+ .priv_size = sizeof(struct mlxsw_sp2_acl_tcam),
+ .init = mlxsw_sp2_acl_tcam_init,
+ .fini = mlxsw_sp2_acl_tcam_fini,
+ .region_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_region),
+ .region_init = mlxsw_sp2_acl_tcam_region_init,
+ .region_fini = mlxsw_sp2_acl_tcam_region_fini,
+ .region_associate = mlxsw_sp2_acl_tcam_region_associate,
+ .chunk_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_chunk),
+ .chunk_init = mlxsw_sp2_acl_tcam_chunk_init,
+ .chunk_fini = mlxsw_sp2_acl_tcam_chunk_fini,
+ .entry_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_entry),
+ .entry_add = mlxsw_sp2_acl_tcam_entry_add,
+ .entry_del = mlxsw_sp2_acl_tcam_entry_del,
+ .entry_activity_get = mlxsw_sp2_acl_tcam_entry_activity_get,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
new file mode 100644
index 000000000000..68c8b148bef2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+
+#include "spectrum.h"
+#include "core.h"
+#include "reg.h"
+#include "resources.h"
+
+struct mlxsw_sp2_kvdl_part_info {
+ u8 res_type;
+ /* For each defined partititon we need to know how many
+ * usage bits we need and how many indexes there are
+ * represented by a single bit. This could be got from FW
+ * querying appropriate resources. So have the resource
+ * ids for for this purpose in partition definition.
+ */
+ enum mlxsw_res_id usage_bit_count_res_id;
+ enum mlxsw_res_id index_range_res_id;
+};
+
+#define MLXSW_SP2_KVDL_PART_INFO(_entry_type, _res_type, \
+ _usage_bit_count_res_id, _index_range_res_id) \
+[MLXSW_SP_KVDL_ENTRY_TYPE_##_entry_type] = { \
+ .res_type = _res_type, \
+ .usage_bit_count_res_id = MLXSW_RES_ID_##_usage_bit_count_res_id, \
+ .index_range_res_id = MLXSW_RES_ID_##_index_range_res_id, \
+}
+
+static const struct mlxsw_sp2_kvdl_part_info mlxsw_sp2_kvdl_parts_info[] = {
+ MLXSW_SP2_KVDL_PART_INFO(ADJ, 0x21, KVD_SIZE, MAX_KVD_LINEAR_RANGE),
+ MLXSW_SP2_KVDL_PART_INFO(ACTSET, 0x23, MAX_KVD_ACTION_SETS,
+ MAX_KVD_ACTION_SETS),
+ MLXSW_SP2_KVDL_PART_INFO(PBS, 0x24, KVD_SIZE, KVD_SIZE),
+ MLXSW_SP2_KVDL_PART_INFO(MCRIGR, 0x26, KVD_SIZE, KVD_SIZE),
+};
+
+#define MLXSW_SP2_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp2_kvdl_parts_info)
+
+struct mlxsw_sp2_kvdl_part {
+ const struct mlxsw_sp2_kvdl_part_info *info;
+ unsigned int usage_bit_count;
+ unsigned int indexes_per_usage_bit;
+ unsigned int last_allocated_bit;
+ unsigned long usage[0]; /* Usage bits */
+};
+
+struct mlxsw_sp2_kvdl {
+ struct mlxsw_sp2_kvdl_part *parts[MLXSW_SP2_KVDL_PARTS_INFO_LEN];
+};
+
+static int mlxsw_sp2_kvdl_part_find_zero_bits(struct mlxsw_sp2_kvdl_part *part,
+ unsigned int bit_count,
+ unsigned int *p_bit)
+{
+ unsigned int start_bit;
+ unsigned int bit;
+ unsigned int i;
+ bool wrap = false;
+
+ start_bit = part->last_allocated_bit + 1;
+ if (start_bit == part->usage_bit_count)
+ start_bit = 0;
+ bit = start_bit;
+again:
+ bit = find_next_zero_bit(part->usage, part->usage_bit_count, bit);
+ if (!wrap && bit + bit_count >= part->usage_bit_count) {
+ wrap = true;
+ bit = 0;
+ goto again;
+ }
+ if (wrap && bit + bit_count >= start_bit)
+ return -ENOBUFS;
+ for (i = 0; i < bit_count; i++) {
+ if (test_bit(bit + i, part->usage)) {
+ bit += bit_count;
+ goto again;
+ }
+ }
+ *p_bit = bit;
+ return 0;
+}
+
+static int mlxsw_sp2_kvdl_part_alloc(struct mlxsw_sp2_kvdl_part *part,
+ unsigned int size,
+ u32 *p_kvdl_index)
+{
+ unsigned int bit_count;
+ unsigned int bit;
+ unsigned int i;
+ int err;
+
+ bit_count = DIV_ROUND_UP(size, part->indexes_per_usage_bit);
+ err = mlxsw_sp2_kvdl_part_find_zero_bits(part, bit_count, &bit);
+ if (err)
+ return err;
+ for (i = 0; i < bit_count; i++)
+ __set_bit(bit + i, part->usage);
+ *p_kvdl_index = bit * part->indexes_per_usage_bit;
+ return 0;
+}
+
+static int mlxsw_sp2_kvdl_rec_del(struct mlxsw_sp *mlxsw_sp, u8 res_type,
+ u16 size, u32 kvdl_index)
+{
+ char *iedr_pl;
+ int err;
+
+ iedr_pl = kmalloc(MLXSW_REG_IEDR_LEN, GFP_KERNEL);
+ if (!iedr_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_iedr_pack(iedr_pl);
+ mlxsw_reg_iedr_rec_pack(iedr_pl, 0, res_type, size, kvdl_index);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(iedr), iedr_pl);
+ kfree(iedr_pl);
+ return err;
+}
+
+static void mlxsw_sp2_kvdl_part_free(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp2_kvdl_part *part,
+ unsigned int size, u32 kvdl_index)
+{
+ unsigned int bit_count;
+ unsigned int bit;
+ unsigned int i;
+ int err;
+
+ /* We need to ask FW to delete previously used KVD linear index */
+ err = mlxsw_sp2_kvdl_rec_del(mlxsw_sp, part->info->res_type,
+ size, kvdl_index);
+ if (err)
+ return;
+
+ bit_count = DIV_ROUND_UP(size, part->indexes_per_usage_bit);
+ bit = kvdl_index / part->indexes_per_usage_bit;
+ for (i = 0; i < bit_count; i++)
+ __clear_bit(bit + i, part->usage);
+}
+
+static int mlxsw_sp2_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ u32 *p_entry_index)
+{
+ unsigned int size = entry_count * mlxsw_sp_kvdl_entry_size(type);
+ struct mlxsw_sp2_kvdl *kvdl = priv;
+ struct mlxsw_sp2_kvdl_part *part = kvdl->parts[type];
+
+ return mlxsw_sp2_kvdl_part_alloc(part, size, p_entry_index);
+}
+
+static void mlxsw_sp2_kvdl_free(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ int entry_index)
+{
+ unsigned int size = entry_count * mlxsw_sp_kvdl_entry_size(type);
+ struct mlxsw_sp2_kvdl *kvdl = priv;
+ struct mlxsw_sp2_kvdl_part *part = kvdl->parts[type];
+
+ return mlxsw_sp2_kvdl_part_free(mlxsw_sp, part, size, entry_index);
+}
+
+static int mlxsw_sp2_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
+ void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ unsigned int *p_alloc_count)
+{
+ *p_alloc_count = entry_count;
+ return 0;
+}
+
+static struct mlxsw_sp2_kvdl_part *
+mlxsw_sp2_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp2_kvdl_part_info *info)
+{
+ unsigned int indexes_per_usage_bit;
+ struct mlxsw_sp2_kvdl_part *part;
+ unsigned int index_range;
+ unsigned int usage_bit_count;
+ size_t usage_size;
+
+ if (!mlxsw_core_res_valid(mlxsw_sp->core,
+ info->usage_bit_count_res_id) ||
+ !mlxsw_core_res_valid(mlxsw_sp->core,
+ info->index_range_res_id))
+ return ERR_PTR(-EIO);
+ usage_bit_count = mlxsw_core_res_get(mlxsw_sp->core,
+ info->usage_bit_count_res_id);
+ index_range = mlxsw_core_res_get(mlxsw_sp->core,
+ info->index_range_res_id);
+
+ /* For some partitions, one usage bit represents a group of indexes.
+ * That's why we compute the number of indexes per usage bit here,
+ * according to queried resources.
+ */
+ indexes_per_usage_bit = index_range / usage_bit_count;
+
+ usage_size = BITS_TO_LONGS(usage_bit_count) * sizeof(unsigned long);
+ part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
+ if (!part)
+ return ERR_PTR(-ENOMEM);
+ part->info = info;
+ part->usage_bit_count = usage_bit_count;
+ part->indexes_per_usage_bit = indexes_per_usage_bit;
+ part->last_allocated_bit = usage_bit_count - 1;
+ return part;
+}
+
+static void mlxsw_sp2_kvdl_part_fini(struct mlxsw_sp2_kvdl_part *part)
+{
+ kfree(part);
+}
+
+static int mlxsw_sp2_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp2_kvdl *kvdl)
+{
+ const struct mlxsw_sp2_kvdl_part_info *info;
+ int i;
+ int err;
+
+ for (i = 0; i < MLXSW_SP2_KVDL_PARTS_INFO_LEN; i++) {
+ info = &mlxsw_sp2_kvdl_parts_info[i];
+ kvdl->parts[i] = mlxsw_sp2_kvdl_part_init(mlxsw_sp, info);
+ if (IS_ERR(kvdl->parts[i])) {
+ err = PTR_ERR(kvdl->parts[i]);
+ goto err_kvdl_part_init;
+ }
+ }
+ return 0;
+
+err_kvdl_part_init:
+ for (i--; i >= 0; i--)
+ mlxsw_sp2_kvdl_part_fini(kvdl->parts[i]);
+ return err;
+}
+
+static void mlxsw_sp2_kvdl_parts_fini(struct mlxsw_sp2_kvdl *kvdl)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP2_KVDL_PARTS_INFO_LEN; i++)
+ mlxsw_sp2_kvdl_part_fini(kvdl->parts[i]);
+}
+
+static int mlxsw_sp2_kvdl_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct mlxsw_sp2_kvdl *kvdl = priv;
+
+ return mlxsw_sp2_kvdl_parts_init(mlxsw_sp, kvdl);
+}
+
+static void mlxsw_sp2_kvdl_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct mlxsw_sp2_kvdl *kvdl = priv;
+
+ mlxsw_sp2_kvdl_parts_fini(kvdl);
+}
+
+const struct mlxsw_sp_kvdl_ops mlxsw_sp2_kvdl_ops = {
+ .priv_size = sizeof(struct mlxsw_sp2_kvdl),
+ .init = mlxsw_sp2_kvdl_init,
+ .fini = mlxsw_sp2_kvdl_fini,
+ .alloc = mlxsw_sp2_kvdl_alloc,
+ .free = mlxsw_sp2_kvdl_free,
+ .alloc_size_query = mlxsw_sp2_kvdl_alloc_size_query,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
new file mode 100644
index 000000000000..4dd62478162e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+
+#include "core_acl_flex_actions.h"
+#include "spectrum.h"
+#include "spectrum_mr.h"
+
+static int
+mlxsw_sp2_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block,
+ enum mlxsw_sp_mr_route_prio prio)
+{
+ return 0;
+}
+
+static void
+mlxsw_sp2_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key)
+{
+}
+
+static int
+mlxsw_sp2_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block)
+{
+ return 0;
+}
+
+static int mlxsw_sp2_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ return 0;
+}
+
+static void mlxsw_sp2_mr_tcam_fini(void *priv)
+{
+}
+
+const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops = {
+ .init = mlxsw_sp2_mr_tcam_init,
+ .fini = mlxsw_sp2_mr_tcam_fini,
+ .route_create = mlxsw_sp2_mr_tcam_route_create,
+ .route_destroy = mlxsw_sp2_mr_tcam_route_destroy,
+ .route_update = mlxsw_sp2_mr_tcam_route_update,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 79b1fa27a9a4..c4f9238591e6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -48,13 +17,12 @@
#include "spectrum.h"
#include "core_acl_flex_keys.h"
#include "core_acl_flex_actions.h"
-#include "spectrum_acl_flex_keys.h"
+#include "spectrum_acl_tcam.h"
struct mlxsw_sp_acl {
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_afk *afk;
struct mlxsw_sp_fid *dummy_fid;
- const struct mlxsw_sp_acl_ops *ops;
struct rhashtable ruleset_ht;
struct list_head rules;
struct {
@@ -62,8 +30,7 @@ struct mlxsw_sp_acl {
unsigned long interval; /* ms */
#define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
} rule_activity_update;
- unsigned long priv[0];
- /* priv has to be always the last item */
+ struct mlxsw_sp_acl_tcam tcam;
};
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
@@ -160,6 +127,17 @@ bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block)
return block->disable_count;
}
+bool mlxsw_sp_acl_block_is_egress_bound(struct mlxsw_sp_acl_block *block)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list) {
+ if (!binding->ingress)
+ return true;
+ }
+ return false;
+}
+
static bool
mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
{
@@ -319,7 +297,8 @@ int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
static struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, u32 chain_index,
- const struct mlxsw_sp_acl_profile_ops *ops)
+ const struct mlxsw_sp_acl_profile_ops *ops,
+ struct mlxsw_afk_element_usage *tmplt_elusage)
{
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
struct mlxsw_sp_acl_ruleset *ruleset;
@@ -339,7 +318,8 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_rhashtable_init;
- err = ops->ruleset_add(mlxsw_sp, acl->priv, ruleset->priv);
+ err = ops->ruleset_add(mlxsw_sp, &acl->tcam, ruleset->priv,
+ tmplt_elusage);
if (err)
goto err_ops_ruleset_add;
@@ -409,7 +389,7 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
struct mlxsw_sp_acl_ruleset *ruleset;
- ops = acl->ops->profile_ops(mlxsw_sp, profile);
+ ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
if (!ops)
return ERR_PTR(-EINVAL);
ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
@@ -421,13 +401,14 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, u32 chain_index,
- enum mlxsw_sp_acl_profile profile)
+ enum mlxsw_sp_acl_profile profile,
+ struct mlxsw_afk_element_usage *tmplt_elusage)
{
const struct mlxsw_sp_acl_profile_ops *ops;
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
struct mlxsw_sp_acl_ruleset *ruleset;
- ops = acl->ops->profile_ops(mlxsw_sp, profile);
+ ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
if (!ops)
return ERR_PTR(-EINVAL);
@@ -436,7 +417,8 @@ mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_ruleset_ref_inc(ruleset);
return ruleset;
}
- return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops);
+ return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops,
+ tmplt_elusage);
}
void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
@@ -487,7 +469,7 @@ int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
unsigned int priority)
{
- rulei->priority = priority;
+ rulei->priority = priority >> 16;
}
void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
@@ -536,18 +518,23 @@ int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei)
int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
- struct net_device *out_dev)
+ struct net_device *out_dev,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port;
u8 local_port;
bool in_port;
if (out_dev) {
- if (!mlxsw_sp_port_dev_check(out_dev))
+ if (!mlxsw_sp_port_dev_check(out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
return -EINVAL;
+ }
mlxsw_sp_port = netdev_priv(out_dev);
- if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp)
+ if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
return -EINVAL;
+ }
local_port = mlxsw_sp_port->local_port;
in_port = false;
} else {
@@ -558,20 +545,22 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
in_port = true;
}
return mlxsw_afa_block_append_fwd(rulei->act_block,
- local_port, in_port);
+ local_port, in_port, extack);
}
int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct mlxsw_sp_acl_block *block,
- struct net_device *out_dev)
+ struct net_device *out_dev,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_acl_block_binding *binding;
struct mlxsw_sp_port *in_port;
- if (!list_is_singular(&block->binding_list))
+ if (!list_is_singular(&block->binding_list)) {
+ NL_SET_ERR_MSG_MOD(extack, "Only a single mirror source is allowed");
return -EOPNOTSUPP;
-
+ }
binding = list_first_entry(&block->binding_list,
struct mlxsw_sp_acl_block_binding, list);
in_port = binding->mlxsw_sp_port;
@@ -579,12 +568,14 @@ int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
return mlxsw_afa_block_append_mirror(rulei->act_block,
in_port->local_port,
out_dev,
- binding->ingress);
+ binding->ingress,
+ extack);
}
int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
- u32 action, u16 vid, u16 proto, u8 prio)
+ u32 action, u16 vid, u16 proto, u8 prio,
+ struct netlink_ext_ack *extack)
{
u8 ethertype;
@@ -597,44 +588,50 @@ int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
ethertype = 1;
break;
default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN protocol");
dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n",
proto);
return -EINVAL;
}
return mlxsw_afa_block_append_vlan_modify(rulei->act_block,
- vid, prio, ethertype);
+ vid, prio, ethertype,
+ extack);
} else {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN action");
dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n");
return -EINVAL;
}
}
int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_rule_info *rulei)
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct netlink_ext_ack *extack)
{
return mlxsw_afa_block_append_counter(rulei->act_block,
- &rulei->counter_index);
+ &rulei->counter_index, extack);
}
int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
- u16 fid)
+ u16 fid, struct netlink_ext_ack *extack)
{
- return mlxsw_afa_block_append_fid_set(rulei->act_block, fid);
+ return mlxsw_afa_block_append_fid_set(rulei->act_block, fid, extack);
}
struct mlxsw_sp_acl_rule *
mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset,
- unsigned long cookie)
+ unsigned long cookie,
+ struct netlink_ext_ack *extack)
{
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
struct mlxsw_sp_acl_rule *rule;
int err;
mlxsw_sp_acl_ruleset_ref_inc(ruleset);
- rule = kzalloc(sizeof(*rule) + ops->rule_priv_size, GFP_KERNEL);
+ rule = kzalloc(sizeof(*rule) + ops->rule_priv_size(mlxsw_sp),
+ GFP_KERNEL);
if (!rule) {
err = -ENOMEM;
goto err_alloc;
@@ -825,20 +822,20 @@ int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
{
- const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops;
struct mlxsw_sp_fid *fid;
struct mlxsw_sp_acl *acl;
+ size_t alloc_size;
int err;
- acl = kzalloc(sizeof(*acl) + acl_ops->priv_size, GFP_KERNEL);
+ alloc_size = sizeof(*acl) + mlxsw_sp_acl_tcam_priv_size(mlxsw_sp);
+ acl = kzalloc(alloc_size, GFP_KERNEL);
if (!acl)
return -ENOMEM;
mlxsw_sp->acl = acl;
acl->mlxsw_sp = mlxsw_sp;
acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_FLEX_KEYS),
- mlxsw_sp_afk_blocks,
- MLXSW_SP_AFK_BLOCKS_COUNT);
+ mlxsw_sp->afk_ops);
if (!acl->afk) {
err = -ENOMEM;
goto err_afk_create;
@@ -857,12 +854,10 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
acl->dummy_fid = fid;
INIT_LIST_HEAD(&acl->rules);
- err = acl_ops->init(mlxsw_sp, acl->priv);
+ err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam);
if (err)
goto err_acl_ops_init;
- acl->ops = acl_ops;
-
/* Create the delayed work for the rule activity_update */
INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
mlxsw_sp_acl_rul_activity_update_work);
@@ -884,10 +879,9 @@ err_afk_create:
void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
- const struct mlxsw_sp_acl_ops *acl_ops = acl->ops;
cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
- acl_ops->fini(mlxsw_sp, acl->priv);
+ mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam);
WARN_ON(!list_empty(&acl->rules));
mlxsw_sp_fid_put(acl->dummy_fid);
rhashtable_destroy(&acl->ruleset_ht);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
new file mode 100644
index 000000000000..2dda028f94db
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/gfp.h>
+#include <linux/refcount.h>
+#include <linux/rhashtable.h>
+
+#include "reg.h"
+#include "core.h"
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+#include "core_acl_flex_keys.h"
+
+#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_START 6
+#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_END 11
+
+struct mlxsw_sp_acl_atcam_lkey_id_ht_key {
+ char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* MSB blocks */
+ u8 erp_id;
+};
+
+struct mlxsw_sp_acl_atcam_lkey_id {
+ struct rhash_head ht_node;
+ struct mlxsw_sp_acl_atcam_lkey_id_ht_key ht_key;
+ refcount_t refcnt;
+ u32 id;
+};
+
+struct mlxsw_sp_acl_atcam_region_ops {
+ int (*init)(struct mlxsw_sp_acl_atcam_region *aregion);
+ void (*fini)(struct mlxsw_sp_acl_atcam_region *aregion);
+ struct mlxsw_sp_acl_atcam_lkey_id *
+ (*lkey_id_get)(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_rule_info *rulei, u8 erp_id);
+ void (*lkey_id_put)(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id);
+};
+
+struct mlxsw_sp_acl_atcam_region_generic {
+ struct mlxsw_sp_acl_atcam_lkey_id dummy_lkey_id;
+};
+
+struct mlxsw_sp_acl_atcam_region_12kb {
+ struct rhashtable lkey_ht;
+ unsigned int max_lkey_id;
+ unsigned long *used_lkey_id;
+};
+
+static const struct rhashtable_params mlxsw_sp_acl_atcam_lkey_id_ht_params = {
+ .key_len = sizeof(struct mlxsw_sp_acl_atcam_lkey_id_ht_key),
+ .key_offset = offsetof(struct mlxsw_sp_acl_atcam_lkey_id, ht_key),
+ .head_offset = offsetof(struct mlxsw_sp_acl_atcam_lkey_id, ht_node),
+};
+
+static const struct rhashtable_params mlxsw_sp_acl_atcam_entries_ht_params = {
+ .key_len = sizeof(struct mlxsw_sp_acl_atcam_entry_ht_key),
+ .key_offset = offsetof(struct mlxsw_sp_acl_atcam_entry, ht_key),
+ .head_offset = offsetof(struct mlxsw_sp_acl_atcam_entry, ht_node),
+};
+
+static bool
+mlxsw_sp_acl_atcam_is_centry(const struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ return mlxsw_sp_acl_erp_is_ctcam_erp(aentry->erp);
+}
+
+static int
+mlxsw_sp_acl_atcam_region_generic_init(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp_acl_atcam_region_generic *region_generic;
+
+ region_generic = kzalloc(sizeof(*region_generic), GFP_KERNEL);
+ if (!region_generic)
+ return -ENOMEM;
+
+ refcount_set(&region_generic->dummy_lkey_id.refcnt, 1);
+ aregion->priv = region_generic;
+
+ return 0;
+}
+
+static void
+mlxsw_sp_acl_atcam_region_generic_fini(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ kfree(aregion->priv);
+}
+
+static struct mlxsw_sp_acl_atcam_lkey_id *
+mlxsw_sp_acl_atcam_generic_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u8 erp_id)
+{
+ struct mlxsw_sp_acl_atcam_region_generic *region_generic;
+
+ region_generic = aregion->priv;
+ return &region_generic->dummy_lkey_id;
+}
+
+static void
+mlxsw_sp_acl_atcam_generic_lkey_id_put(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id)
+{
+}
+
+static const struct mlxsw_sp_acl_atcam_region_ops
+mlxsw_sp_acl_atcam_region_generic_ops = {
+ .init = mlxsw_sp_acl_atcam_region_generic_init,
+ .fini = mlxsw_sp_acl_atcam_region_generic_fini,
+ .lkey_id_get = mlxsw_sp_acl_atcam_generic_lkey_id_get,
+ .lkey_id_put = mlxsw_sp_acl_atcam_generic_lkey_id_put,
+};
+
+static int
+mlxsw_sp_acl_atcam_region_12kb_init(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp;
+ struct mlxsw_sp_acl_atcam_region_12kb *region_12kb;
+ size_t alloc_size;
+ u64 max_lkey_id;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_LARGE_KEY_ID))
+ return -EIO;
+
+ max_lkey_id = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_LARGE_KEY_ID);
+ region_12kb = kzalloc(sizeof(*region_12kb), GFP_KERNEL);
+ if (!region_12kb)
+ return -ENOMEM;
+
+ alloc_size = BITS_TO_LONGS(max_lkey_id) * sizeof(unsigned long);
+ region_12kb->used_lkey_id = kzalloc(alloc_size, GFP_KERNEL);
+ if (!region_12kb->used_lkey_id) {
+ err = -ENOMEM;
+ goto err_used_lkey_id_alloc;
+ }
+
+ err = rhashtable_init(&region_12kb->lkey_ht,
+ &mlxsw_sp_acl_atcam_lkey_id_ht_params);
+ if (err)
+ goto err_rhashtable_init;
+
+ region_12kb->max_lkey_id = max_lkey_id;
+ aregion->priv = region_12kb;
+
+ return 0;
+
+err_rhashtable_init:
+ kfree(region_12kb->used_lkey_id);
+err_used_lkey_id_alloc:
+ kfree(region_12kb);
+ return err;
+}
+
+static void
+mlxsw_sp_acl_atcam_region_12kb_fini(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
+
+ rhashtable_destroy(&region_12kb->lkey_ht);
+ kfree(region_12kb->used_lkey_id);
+ kfree(region_12kb);
+}
+
+static struct mlxsw_sp_acl_atcam_lkey_id *
+mlxsw_sp_acl_atcam_lkey_id_create(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_lkey_id_ht_key *ht_key)
+{
+ struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
+ u32 id;
+ int err;
+
+ id = find_first_zero_bit(region_12kb->used_lkey_id,
+ region_12kb->max_lkey_id);
+ if (id < region_12kb->max_lkey_id)
+ __set_bit(id, region_12kb->used_lkey_id);
+ else
+ return ERR_PTR(-ENOBUFS);
+
+ lkey_id = kzalloc(sizeof(*lkey_id), GFP_KERNEL);
+ if (!lkey_id) {
+ err = -ENOMEM;
+ goto err_lkey_id_alloc;
+ }
+
+ lkey_id->id = id;
+ memcpy(&lkey_id->ht_key, ht_key, sizeof(*ht_key));
+ refcount_set(&lkey_id->refcnt, 1);
+
+ err = rhashtable_insert_fast(&region_12kb->lkey_ht,
+ &lkey_id->ht_node,
+ mlxsw_sp_acl_atcam_lkey_id_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ return lkey_id;
+
+err_rhashtable_insert:
+ kfree(lkey_id);
+err_lkey_id_alloc:
+ __clear_bit(id, region_12kb->used_lkey_id);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_atcam_lkey_id_destroy(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id)
+{
+ struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
+ u32 id = lkey_id->id;
+
+ rhashtable_remove_fast(&region_12kb->lkey_ht, &lkey_id->ht_node,
+ mlxsw_sp_acl_atcam_lkey_id_ht_params);
+ kfree(lkey_id);
+ __clear_bit(id, region_12kb->used_lkey_id);
+}
+
+static struct mlxsw_sp_acl_atcam_lkey_id *
+mlxsw_sp_acl_atcam_12kb_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u8 erp_id)
+{
+ struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
+ struct mlxsw_sp_acl_tcam_region *region = aregion->region;
+ struct mlxsw_sp_acl_atcam_lkey_id_ht_key ht_key = {{ 0 } };
+ struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
+ struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
+
+ mlxsw_afk_encode(afk, region->key_info, &rulei->values, ht_key.enc_key,
+ NULL, MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_START,
+ MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_END);
+ ht_key.erp_id = erp_id;
+ lkey_id = rhashtable_lookup_fast(&region_12kb->lkey_ht, &ht_key,
+ mlxsw_sp_acl_atcam_lkey_id_ht_params);
+ if (lkey_id) {
+ refcount_inc(&lkey_id->refcnt);
+ return lkey_id;
+ }
+
+ return mlxsw_sp_acl_atcam_lkey_id_create(aregion, &ht_key);
+}
+
+static void
+mlxsw_sp_acl_atcam_12kb_lkey_id_put(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id)
+{
+ if (refcount_dec_and_test(&lkey_id->refcnt))
+ mlxsw_sp_acl_atcam_lkey_id_destroy(aregion, lkey_id);
+}
+
+static const struct mlxsw_sp_acl_atcam_region_ops
+mlxsw_sp_acl_atcam_region_12kb_ops = {
+ .init = mlxsw_sp_acl_atcam_region_12kb_init,
+ .fini = mlxsw_sp_acl_atcam_region_12kb_fini,
+ .lkey_id_get = mlxsw_sp_acl_atcam_12kb_lkey_id_get,
+ .lkey_id_put = mlxsw_sp_acl_atcam_12kb_lkey_id_put,
+};
+
+static const struct mlxsw_sp_acl_atcam_region_ops *
+mlxsw_sp_acl_atcam_region_ops_arr[] = {
+ [MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB] =
+ &mlxsw_sp_acl_atcam_region_generic_ops,
+ [MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB] =
+ &mlxsw_sp_acl_atcam_region_generic_ops,
+ [MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB] =
+ &mlxsw_sp_acl_atcam_region_generic_ops,
+ [MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB] =
+ &mlxsw_sp_acl_atcam_region_12kb_ops,
+};
+
+int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp,
+ u16 region_id)
+{
+ char perar_pl[MLXSW_REG_PERAR_LEN];
+ /* For now, just assume that every region has 12 key blocks */
+ u16 hw_region = region_id * 3;
+ u64 max_regions;
+
+ max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
+ if (hw_region >= max_regions)
+ return -ENOBUFS;
+
+ mlxsw_reg_perar_pack(perar_pl, region_id, hw_region);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(perar), perar_pl);
+}
+
+static void
+mlxsw_sp_acl_atcam_region_type_init(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp_acl_tcam_region *region = aregion->region;
+ enum mlxsw_sp_acl_atcam_region_type region_type;
+ unsigned int blocks_count;
+
+ /* We already know the blocks count can not exceed the maximum
+ * blocks count.
+ */
+ blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info);
+ if (blocks_count <= 2)
+ region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB;
+ else if (blocks_count <= 4)
+ region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB;
+ else if (blocks_count <= 8)
+ region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB;
+ else
+ region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB;
+
+ aregion->type = region_type;
+ aregion->ops = mlxsw_sp_acl_atcam_region_ops_arr[region_type];
+}
+
+int
+mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_tcam_region *region,
+ const struct mlxsw_sp_acl_ctcam_region_ops *ops)
+{
+ int err;
+
+ aregion->region = region;
+ aregion->atcam = atcam;
+ mlxsw_sp_acl_atcam_region_type_init(aregion);
+
+ err = rhashtable_init(&aregion->entries_ht,
+ &mlxsw_sp_acl_atcam_entries_ht_params);
+ if (err)
+ return err;
+ err = aregion->ops->init(aregion);
+ if (err)
+ goto err_ops_init;
+ err = mlxsw_sp_acl_erp_region_init(aregion);
+ if (err)
+ goto err_erp_region_init;
+ err = mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, &aregion->cregion,
+ region, ops);
+ if (err)
+ goto err_ctcam_region_init;
+
+ return 0;
+
+err_ctcam_region_init:
+ mlxsw_sp_acl_erp_region_fini(aregion);
+err_erp_region_init:
+ aregion->ops->fini(aregion);
+err_ops_init:
+ rhashtable_destroy(&aregion->entries_ht);
+ return err;
+}
+
+void mlxsw_sp_acl_atcam_region_fini(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ mlxsw_sp_acl_ctcam_region_fini(&aregion->cregion);
+ mlxsw_sp_acl_erp_region_fini(aregion);
+ aregion->ops->fini(aregion);
+ rhashtable_destroy(&aregion->entries_ht);
+}
+
+void mlxsw_sp_acl_atcam_chunk_init(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ unsigned int priority)
+{
+ mlxsw_sp_acl_ctcam_chunk_init(&aregion->cregion, &achunk->cchunk,
+ priority);
+}
+
+void mlxsw_sp_acl_atcam_chunk_fini(struct mlxsw_sp_acl_atcam_chunk *achunk)
+{
+ mlxsw_sp_acl_ctcam_chunk_fini(&achunk->cchunk);
+}
+
+static int
+mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp_acl_tcam_region *region = aregion->region;
+ u8 erp_id = mlxsw_sp_acl_erp_id(aentry->erp);
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
+ char ptce3_pl[MLXSW_REG_PTCE3_LEN];
+ u32 kvdl_index, priority;
+ int err;
+
+ err = mlxsw_sp_acl_tcam_priority_get(mlxsw_sp, rulei, &priority, true);
+ if (err)
+ return err;
+
+ lkey_id = aregion->ops->lkey_id_get(aregion, rulei, erp_id);
+ if (IS_ERR(lkey_id))
+ return PTR_ERR(lkey_id);
+ aentry->lkey_id = lkey_id;
+
+ kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block);
+ mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_WRITE,
+ priority, region->tcam_region_info,
+ aentry->ht_key.enc_key, erp_id,
+ refcount_read(&lkey_id->refcnt) != 1, lkey_id->id,
+ kvdl_index);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl);
+ if (err)
+ goto err_ptce3_write;
+
+ return 0;
+
+err_ptce3_write:
+ aregion->ops->lkey_id_put(aregion, lkey_id);
+ return err;
+}
+
+static void
+mlxsw_sp_acl_atcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id = aentry->lkey_id;
+ struct mlxsw_sp_acl_tcam_region *region = aregion->region;
+ u8 erp_id = mlxsw_sp_acl_erp_id(aentry->erp);
+ char ptce3_pl[MLXSW_REG_PTCE3_LEN];
+
+ mlxsw_reg_ptce3_pack(ptce3_pl, false, MLXSW_REG_PTCE3_OP_WRITE_WRITE, 0,
+ region->tcam_region_info, aentry->ht_key.enc_key,
+ erp_id, refcount_read(&lkey_id->refcnt) != 1,
+ lkey_id->id, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl);
+ aregion->ops->lkey_id_put(aregion, lkey_id);
+}
+
+static int
+__mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp_acl_tcam_region *region = aregion->region;
+ char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN] = { 0 };
+ struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
+ struct mlxsw_sp_acl_erp *erp;
+ unsigned int blocks_count;
+ int err;
+
+ blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info);
+ mlxsw_afk_encode(afk, region->key_info, &rulei->values,
+ aentry->ht_key.enc_key, mask, 0, blocks_count - 1);
+
+ erp = mlxsw_sp_acl_erp_get(aregion, mask, false);
+ if (IS_ERR(erp))
+ return PTR_ERR(erp);
+ aentry->erp = erp;
+ aentry->ht_key.erp_id = mlxsw_sp_acl_erp_id(erp);
+
+ /* We can't insert identical rules into the A-TCAM, so fail and
+ * let the rule spill into C-TCAM
+ */
+ err = rhashtable_lookup_insert_fast(&aregion->entries_ht,
+ &aentry->ht_node,
+ mlxsw_sp_acl_atcam_entries_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ err = mlxsw_sp_acl_atcam_region_entry_insert(mlxsw_sp, aregion, aentry,
+ rulei);
+ if (err)
+ goto err_rule_insert;
+
+ return 0;
+
+err_rule_insert:
+ rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node,
+ mlxsw_sp_acl_atcam_entries_ht_params);
+err_rhashtable_insert:
+ mlxsw_sp_acl_erp_put(aregion, erp);
+ return err;
+}
+
+static void
+__mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ mlxsw_sp_acl_atcam_region_entry_remove(mlxsw_sp, aregion, aentry);
+ rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node,
+ mlxsw_sp_acl_atcam_entries_ht_params);
+ mlxsw_sp_acl_erp_put(aregion, aentry->erp);
+}
+
+int mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ int err;
+
+ err = __mlxsw_sp_acl_atcam_entry_add(mlxsw_sp, aregion, aentry, rulei);
+ if (!err)
+ return 0;
+
+ /* It is possible we failed to add the rule to the A-TCAM due to
+ * exceeded number of masks. Try to spill into C-TCAM.
+ */
+ err = mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, &aregion->cregion,
+ &achunk->cchunk, &aentry->centry,
+ rulei, true);
+ if (!err)
+ return 0;
+
+ return err;
+}
+
+void mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ if (mlxsw_sp_acl_atcam_is_centry(aentry))
+ mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, &aregion->cregion,
+ &achunk->cchunk, &aentry->centry);
+ else
+ __mlxsw_sp_acl_atcam_entry_del(mlxsw_sp, aregion, aentry);
+}
+
+int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam)
+{
+ return mlxsw_sp_acl_erps_init(mlxsw_sp, atcam);
+}
+
+void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam)
+{
+ mlxsw_sp_acl_erps_fini(mlxsw_sp, atcam);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
new file mode 100644
index 000000000000..e3c6fe8b1d40
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/parman.h>
+
+#include "reg.h"
+#include "core.h"
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+
+static int
+mlxsw_sp_acl_ctcam_region_resize(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region,
+ u16 new_size)
+{
+ char ptar_pl[MLXSW_REG_PTAR_LEN];
+
+ mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE,
+ region->key_type, new_size, region->id,
+ region->tcam_region_info);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
+}
+
+static void
+mlxsw_sp_acl_ctcam_region_move(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region,
+ u16 src_offset, u16 dst_offset, u16 size)
+{
+ char prcr_pl[MLXSW_REG_PRCR_LEN];
+
+ mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE,
+ region->tcam_region_info, src_offset,
+ region->tcam_region_info, dst_offset, size);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl);
+}
+
+static int
+mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ bool fillup_priority)
+{
+ struct mlxsw_sp_acl_tcam_region *region = cregion->region;
+ struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
+ char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+ unsigned int blocks_count;
+ char *act_set;
+ u32 priority;
+ char *mask;
+ char *key;
+ int err;
+
+ err = mlxsw_sp_acl_tcam_priority_get(mlxsw_sp, rulei, &priority,
+ fillup_priority);
+ if (err)
+ return err;
+
+ mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
+ region->tcam_region_info,
+ centry->parman_item.index, priority);
+ key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl);
+ mask = mlxsw_reg_ptce2_mask_data(ptce2_pl);
+ blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info);
+ mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask, 0,
+ blocks_count - 1);
+
+ err = cregion->ops->entry_insert(cregion, centry, mask);
+ if (err)
+ return err;
+
+ /* Only the first action set belongs here, the rest is in KVD */
+ act_set = mlxsw_afa_block_first_set(rulei->act_block);
+ mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+}
+
+static void
+mlxsw_sp_acl_ctcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+ char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+
+ mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
+ cregion->region->tcam_region_info,
+ centry->parman_item.index, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+ cregion->ops->entry_remove(cregion, centry);
+}
+
+static int mlxsw_sp_acl_ctcam_region_parman_resize(void *priv,
+ unsigned long new_count)
+{
+ struct mlxsw_sp_acl_ctcam_region *cregion = priv;
+ struct mlxsw_sp_acl_tcam_region *region = cregion->region;
+ struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
+ u64 max_tcam_rules;
+
+ max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
+ if (new_count > max_tcam_rules)
+ return -EINVAL;
+ return mlxsw_sp_acl_ctcam_region_resize(mlxsw_sp, region, new_count);
+}
+
+static void mlxsw_sp_acl_ctcam_region_parman_move(void *priv,
+ unsigned long from_index,
+ unsigned long to_index,
+ unsigned long count)
+{
+ struct mlxsw_sp_acl_ctcam_region *cregion = priv;
+ struct mlxsw_sp_acl_tcam_region *region = cregion->region;
+ struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
+
+ mlxsw_sp_acl_ctcam_region_move(mlxsw_sp, region,
+ from_index, to_index, count);
+}
+
+static const struct parman_ops mlxsw_sp_acl_ctcam_region_parman_ops = {
+ .base_count = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
+ .resize_step = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP,
+ .resize = mlxsw_sp_acl_ctcam_region_parman_resize,
+ .move = mlxsw_sp_acl_ctcam_region_parman_move,
+ .algo = PARMAN_ALGO_TYPE_LSORT,
+};
+
+int
+mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_tcam_region *region,
+ const struct mlxsw_sp_acl_ctcam_region_ops *ops)
+{
+ cregion->region = region;
+ cregion->ops = ops;
+ cregion->parman = parman_create(&mlxsw_sp_acl_ctcam_region_parman_ops,
+ cregion);
+ if (!cregion->parman)
+ return -ENOMEM;
+ return 0;
+}
+
+void mlxsw_sp_acl_ctcam_region_fini(struct mlxsw_sp_acl_ctcam_region *cregion)
+{
+ parman_destroy(cregion->parman);
+}
+
+void mlxsw_sp_acl_ctcam_chunk_init(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ unsigned int priority)
+{
+ parman_prio_init(cregion->parman, &cchunk->parman_prio, priority);
+}
+
+void mlxsw_sp_acl_ctcam_chunk_fini(struct mlxsw_sp_acl_ctcam_chunk *cchunk)
+{
+ parman_prio_fini(&cchunk->parman_prio);
+}
+
+int mlxsw_sp_acl_ctcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ bool fillup_priority)
+{
+ int err;
+
+ err = parman_item_add(cregion->parman, &cchunk->parman_prio,
+ &centry->parman_item);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_acl_ctcam_region_entry_insert(mlxsw_sp, cregion, centry,
+ rulei, fillup_priority);
+ if (err)
+ goto err_rule_insert;
+ return 0;
+
+err_rule_insert:
+ parman_item_remove(cregion->parman, &cchunk->parman_prio,
+ &centry->parman_item);
+ return err;
+}
+
+void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+ mlxsw_sp_acl_ctcam_region_entry_remove(mlxsw_sp, cregion, centry);
+ parman_item_remove(cregion->parman, &cchunk->parman_prio,
+ &centry->parman_item);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
new file mode 100644
index 000000000000..0a4fd3c8662a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
@@ -0,0 +1,1168 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/bitmap.h>
+#include <linux/errno.h>
+#include <linux/genalloc.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/rhashtable.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+
+#include "core.h"
+#include "reg.h"
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+
+/* gen_pool_alloc() returns 0 when allocation fails, so use an offset */
+#define MLXSW_SP_ACL_ERP_GENALLOC_OFFSET 0x100
+#define MLXSW_SP_ACL_ERP_MAX_PER_REGION 16
+
+struct mlxsw_sp_acl_erp_core {
+ unsigned int erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX + 1];
+ struct gen_pool *erp_tables;
+ struct mlxsw_sp *mlxsw_sp;
+ unsigned int num_erp_banks;
+};
+
+struct mlxsw_sp_acl_erp_key {
+ char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN];
+ bool ctcam;
+};
+
+struct mlxsw_sp_acl_erp {
+ struct mlxsw_sp_acl_erp_key key;
+ u8 id;
+ u8 index;
+ refcount_t refcnt;
+ DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
+ struct list_head list;
+ struct rhash_head ht_node;
+ struct mlxsw_sp_acl_erp_table *erp_table;
+};
+
+struct mlxsw_sp_acl_erp_master_mask {
+ DECLARE_BITMAP(bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
+ unsigned int count[MLXSW_SP_ACL_TCAM_MASK_LEN];
+};
+
+struct mlxsw_sp_acl_erp_table {
+ struct mlxsw_sp_acl_erp_master_mask master_mask;
+ DECLARE_BITMAP(erp_id_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ DECLARE_BITMAP(erp_index_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ struct list_head atcam_erps_list;
+ struct rhashtable erp_ht;
+ struct mlxsw_sp_acl_erp_core *erp_core;
+ struct mlxsw_sp_acl_atcam_region *aregion;
+ const struct mlxsw_sp_acl_erp_table_ops *ops;
+ unsigned long base_index;
+ unsigned int num_atcam_erps;
+ unsigned int num_max_atcam_erps;
+ unsigned int num_ctcam_erps;
+};
+
+static const struct rhashtable_params mlxsw_sp_acl_erp_ht_params = {
+ .key_len = sizeof(struct mlxsw_sp_acl_erp_key),
+ .key_offset = offsetof(struct mlxsw_sp_acl_erp, key),
+ .head_offset = offsetof(struct mlxsw_sp_acl_erp, ht_node),
+};
+
+struct mlxsw_sp_acl_erp_table_ops {
+ struct mlxsw_sp_acl_erp *
+ (*erp_create)(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key);
+ void (*erp_destroy)(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp);
+};
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key);
+static void
+mlxsw_sp_acl_erp_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp);
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_second_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key);
+static void
+mlxsw_sp_acl_erp_second_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp);
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_first_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key);
+static void
+mlxsw_sp_acl_erp_first_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp);
+static void
+mlxsw_sp_acl_erp_no_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp);
+
+static const struct mlxsw_sp_acl_erp_table_ops erp_multiple_masks_ops = {
+ .erp_create = mlxsw_sp_acl_erp_mask_create,
+ .erp_destroy = mlxsw_sp_acl_erp_mask_destroy,
+};
+
+static const struct mlxsw_sp_acl_erp_table_ops erp_two_masks_ops = {
+ .erp_create = mlxsw_sp_acl_erp_mask_create,
+ .erp_destroy = mlxsw_sp_acl_erp_second_mask_destroy,
+};
+
+static const struct mlxsw_sp_acl_erp_table_ops erp_single_mask_ops = {
+ .erp_create = mlxsw_sp_acl_erp_second_mask_create,
+ .erp_destroy = mlxsw_sp_acl_erp_first_mask_destroy,
+};
+
+static const struct mlxsw_sp_acl_erp_table_ops erp_no_mask_ops = {
+ .erp_create = mlxsw_sp_acl_erp_first_mask_create,
+ .erp_destroy = mlxsw_sp_acl_erp_no_mask_destroy,
+};
+
+bool mlxsw_sp_acl_erp_is_ctcam_erp(const struct mlxsw_sp_acl_erp *erp)
+{
+ return erp->key.ctcam;
+}
+
+u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp)
+{
+ return erp->id;
+}
+
+static unsigned int
+mlxsw_sp_acl_erp_table_entry_size(const struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion = erp_table->aregion;
+ struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
+
+ return erp_core->erpt_entries_size[aregion->type];
+}
+
+static int mlxsw_sp_acl_erp_id_get(struct mlxsw_sp_acl_erp_table *erp_table,
+ u8 *p_id)
+{
+ u8 id;
+
+ id = find_first_zero_bit(erp_table->erp_id_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ if (id < MLXSW_SP_ACL_ERP_MAX_PER_REGION) {
+ __set_bit(id, erp_table->erp_id_bitmap);
+ *p_id = id;
+ return 0;
+ }
+
+ return -ENOBUFS;
+}
+
+static void mlxsw_sp_acl_erp_id_put(struct mlxsw_sp_acl_erp_table *erp_table,
+ u8 id)
+{
+ __clear_bit(id, erp_table->erp_id_bitmap);
+}
+
+static void
+mlxsw_sp_acl_erp_master_mask_bit_set(unsigned long bit,
+ struct mlxsw_sp_acl_erp_master_mask *mask)
+{
+ if (mask->count[bit]++ == 0)
+ __set_bit(bit, mask->bitmap);
+}
+
+static void
+mlxsw_sp_acl_erp_master_mask_bit_clear(unsigned long bit,
+ struct mlxsw_sp_acl_erp_master_mask *mask)
+{
+ if (--mask->count[bit] == 0)
+ __clear_bit(bit, mask->bitmap);
+}
+
+static int
+mlxsw_sp_acl_erp_master_mask_update(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
+ struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
+ char percr_pl[MLXSW_REG_PERCR_LEN];
+ char *master_mask;
+
+ mlxsw_reg_percr_pack(percr_pl, region->id);
+ master_mask = mlxsw_reg_percr_master_mask_data(percr_pl);
+ bitmap_to_arr32((u32 *) master_mask, erp_table->master_mask.bitmap,
+ MLXSW_SP_ACL_TCAM_MASK_LEN);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(percr), percr_pl);
+}
+
+static int
+mlxsw_sp_acl_erp_master_mask_set(struct mlxsw_sp_acl_erp_table *erp_table,
+ const struct mlxsw_sp_acl_erp *erp)
+{
+ unsigned long bit;
+ int err;
+
+ for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ mlxsw_sp_acl_erp_master_mask_bit_set(bit,
+ &erp_table->master_mask);
+
+ err = mlxsw_sp_acl_erp_master_mask_update(erp_table);
+ if (err)
+ goto err_master_mask_update;
+
+ return 0;
+
+err_master_mask_update:
+ for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ mlxsw_sp_acl_erp_master_mask_bit_clear(bit,
+ &erp_table->master_mask);
+ return err;
+}
+
+static int
+mlxsw_sp_acl_erp_master_mask_clear(struct mlxsw_sp_acl_erp_table *erp_table,
+ const struct mlxsw_sp_acl_erp *erp)
+{
+ unsigned long bit;
+ int err;
+
+ for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ mlxsw_sp_acl_erp_master_mask_bit_clear(bit,
+ &erp_table->master_mask);
+
+ err = mlxsw_sp_acl_erp_master_mask_update(erp_table);
+ if (err)
+ goto err_master_mask_update;
+
+ return 0;
+
+err_master_mask_update:
+ for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ mlxsw_sp_acl_erp_master_mask_bit_set(bit,
+ &erp_table->master_mask);
+ return err;
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_generic_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ struct mlxsw_sp_acl_erp *erp;
+ int err;
+
+ erp = kzalloc(sizeof(*erp), GFP_KERNEL);
+ if (!erp)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlxsw_sp_acl_erp_id_get(erp_table, &erp->id);
+ if (err)
+ goto err_erp_id_get;
+
+ memcpy(&erp->key, key, sizeof(*key));
+ bitmap_from_arr32(erp->mask_bitmap, (u32 *) key->mask,
+ MLXSW_SP_ACL_TCAM_MASK_LEN);
+ list_add(&erp->list, &erp_table->atcam_erps_list);
+ refcount_set(&erp->refcnt, 1);
+ erp_table->num_atcam_erps++;
+ erp->erp_table = erp_table;
+
+ err = mlxsw_sp_acl_erp_master_mask_set(erp_table, erp);
+ if (err)
+ goto err_master_mask_set;
+
+ err = rhashtable_insert_fast(&erp_table->erp_ht, &erp->ht_node,
+ mlxsw_sp_acl_erp_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ return erp;
+
+err_rhashtable_insert:
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
+err_master_mask_set:
+ erp_table->num_atcam_erps--;
+ list_del(&erp->list);
+ mlxsw_sp_acl_erp_id_put(erp_table, erp->id);
+err_erp_id_get:
+ kfree(erp);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_erp_generic_destroy(struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
+
+ rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
+ mlxsw_sp_acl_erp_ht_params);
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
+ erp_table->num_atcam_erps--;
+ list_del(&erp->list);
+ mlxsw_sp_acl_erp_id_put(erp_table, erp->id);
+ kfree(erp);
+}
+
+static int
+mlxsw_sp_acl_erp_table_alloc(struct mlxsw_sp_acl_erp_core *erp_core,
+ unsigned int num_erps,
+ enum mlxsw_sp_acl_atcam_region_type region_type,
+ unsigned long *p_index)
+{
+ unsigned int num_rows, entry_size;
+
+ /* We only allow allocations of entire rows */
+ if (num_erps % erp_core->num_erp_banks != 0)
+ return -EINVAL;
+
+ entry_size = erp_core->erpt_entries_size[region_type];
+ num_rows = num_erps / erp_core->num_erp_banks;
+
+ *p_index = gen_pool_alloc(erp_core->erp_tables, num_rows * entry_size);
+ if (*p_index == 0)
+ return -ENOBUFS;
+ *p_index -= MLXSW_SP_ACL_ERP_GENALLOC_OFFSET;
+
+ return 0;
+}
+
+static void
+mlxsw_sp_acl_erp_table_free(struct mlxsw_sp_acl_erp_core *erp_core,
+ unsigned int num_erps,
+ enum mlxsw_sp_acl_atcam_region_type region_type,
+ unsigned long index)
+{
+ unsigned long base_index;
+ unsigned int entry_size;
+ size_t size;
+
+ entry_size = erp_core->erpt_entries_size[region_type];
+ base_index = index + MLXSW_SP_ACL_ERP_GENALLOC_OFFSET;
+ size = num_erps / erp_core->num_erp_banks * entry_size;
+ gen_pool_free(erp_core->erp_tables, base_index, size);
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_table_master_rp(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ if (!list_is_singular(&erp_table->atcam_erps_list))
+ return NULL;
+
+ return list_first_entry(&erp_table->atcam_erps_list,
+ struct mlxsw_sp_acl_erp, list);
+}
+
+static int mlxsw_sp_acl_erp_index_get(struct mlxsw_sp_acl_erp_table *erp_table,
+ u8 *p_index)
+{
+ u8 index;
+
+ index = find_first_zero_bit(erp_table->erp_index_bitmap,
+ erp_table->num_max_atcam_erps);
+ if (index < erp_table->num_max_atcam_erps) {
+ __set_bit(index, erp_table->erp_index_bitmap);
+ *p_index = index;
+ return 0;
+ }
+
+ return -ENOBUFS;
+}
+
+static void mlxsw_sp_acl_erp_index_put(struct mlxsw_sp_acl_erp_table *erp_table,
+ u8 index)
+{
+ __clear_bit(index, erp_table->erp_index_bitmap);
+}
+
+static void
+mlxsw_sp_acl_erp_table_locate(const struct mlxsw_sp_acl_erp_table *erp_table,
+ const struct mlxsw_sp_acl_erp *erp,
+ u8 *p_erpt_bank, u8 *p_erpt_index)
+{
+ unsigned int entry_size = mlxsw_sp_acl_erp_table_entry_size(erp_table);
+ struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
+ unsigned int row;
+
+ *p_erpt_bank = erp->index % erp_core->num_erp_banks;
+ row = erp->index / erp_core->num_erp_banks;
+ *p_erpt_index = erp_table->base_index + row * entry_size;
+}
+
+static int
+mlxsw_sp_acl_erp_table_erp_add(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ enum mlxsw_reg_perpt_key_size key_size;
+ char perpt_pl[MLXSW_REG_PERPT_LEN];
+ u8 erpt_bank, erpt_index;
+
+ mlxsw_sp_acl_erp_table_locate(erp_table, erp, &erpt_bank, &erpt_index);
+ key_size = (enum mlxsw_reg_perpt_key_size) erp_table->aregion->type;
+ mlxsw_reg_perpt_pack(perpt_pl, erpt_bank, erpt_index, key_size, erp->id,
+ 0, erp_table->base_index, erp->index,
+ erp->key.mask);
+ mlxsw_reg_perpt_erp_vector_pack(perpt_pl, erp_table->erp_index_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ mlxsw_reg_perpt_erp_vector_set(perpt_pl, erp->index, true);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(perpt), perpt_pl);
+}
+
+static void mlxsw_sp_acl_erp_table_erp_del(struct mlxsw_sp_acl_erp *erp)
+{
+ char empty_mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN] = { 0 };
+ struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ enum mlxsw_reg_perpt_key_size key_size;
+ char perpt_pl[MLXSW_REG_PERPT_LEN];
+ u8 erpt_bank, erpt_index;
+
+ mlxsw_sp_acl_erp_table_locate(erp_table, erp, &erpt_bank, &erpt_index);
+ key_size = (enum mlxsw_reg_perpt_key_size) erp_table->aregion->type;
+ mlxsw_reg_perpt_pack(perpt_pl, erpt_bank, erpt_index, key_size, erp->id,
+ 0, erp_table->base_index, erp->index, empty_mask);
+ mlxsw_reg_perpt_erp_vector_pack(perpt_pl, erp_table->erp_index_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ mlxsw_reg_perpt_erp_vector_set(perpt_pl, erp->index, false);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(perpt), perpt_pl);
+}
+
+static int
+mlxsw_sp_acl_erp_table_enable(struct mlxsw_sp_acl_erp_table *erp_table,
+ bool ctcam_le)
+{
+ struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ char pererp_pl[MLXSW_REG_PERERP_LEN];
+
+ mlxsw_reg_pererp_pack(pererp_pl, region->id, ctcam_le, true, 0,
+ erp_table->base_index, 0);
+ mlxsw_reg_pererp_erp_vector_pack(pererp_pl, erp_table->erp_index_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
+}
+
+static void
+mlxsw_sp_acl_erp_table_disable(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ char pererp_pl[MLXSW_REG_PERERP_LEN];
+ struct mlxsw_sp_acl_erp *master_rp;
+
+ master_rp = mlxsw_sp_acl_erp_table_master_rp(erp_table);
+ /* It is possible we do not have a master RP when we disable the
+ * table when there are no rules in the A-TCAM and the last C-TCAM
+ * rule is deleted
+ */
+ mlxsw_reg_pererp_pack(pererp_pl, region->id, false, false, 0, 0,
+ master_rp ? master_rp->id : 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
+}
+
+static int
+mlxsw_sp_acl_erp_table_relocate(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_erp *erp;
+ int err;
+
+ list_for_each_entry(erp, &erp_table->atcam_erps_list, list) {
+ err = mlxsw_sp_acl_erp_table_erp_add(erp_table, erp);
+ if (err)
+ goto err_table_erp_add;
+ }
+
+ return 0;
+
+err_table_erp_add:
+ list_for_each_entry_continue_reverse(erp, &erp_table->atcam_erps_list,
+ list)
+ mlxsw_sp_acl_erp_table_erp_del(erp);
+ return err;
+}
+
+static int
+mlxsw_sp_acl_erp_table_expand(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ unsigned int num_erps, old_num_erps = erp_table->num_max_atcam_erps;
+ struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
+ unsigned long old_base_index = erp_table->base_index;
+ bool ctcam_le = erp_table->num_ctcam_erps > 0;
+ int err;
+
+ if (erp_table->num_atcam_erps < erp_table->num_max_atcam_erps)
+ return 0;
+
+ if (erp_table->num_max_atcam_erps == MLXSW_SP_ACL_ERP_MAX_PER_REGION)
+ return -ENOBUFS;
+
+ num_erps = old_num_erps + erp_core->num_erp_banks;
+ err = mlxsw_sp_acl_erp_table_alloc(erp_core, num_erps,
+ erp_table->aregion->type,
+ &erp_table->base_index);
+ if (err)
+ return err;
+ erp_table->num_max_atcam_erps = num_erps;
+
+ err = mlxsw_sp_acl_erp_table_relocate(erp_table);
+ if (err)
+ goto err_table_relocate;
+
+ err = mlxsw_sp_acl_erp_table_enable(erp_table, ctcam_le);
+ if (err)
+ goto err_table_enable;
+
+ mlxsw_sp_acl_erp_table_free(erp_core, old_num_erps,
+ erp_table->aregion->type, old_base_index);
+
+ return 0;
+
+err_table_enable:
+err_table_relocate:
+ erp_table->num_max_atcam_erps = old_num_erps;
+ mlxsw_sp_acl_erp_table_free(erp_core, num_erps,
+ erp_table->aregion->type,
+ erp_table->base_index);
+ erp_table->base_index = old_base_index;
+ return err;
+}
+
+static int
+mlxsw_sp_acl_erp_region_table_trans(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
+ struct mlxsw_sp_acl_erp *master_rp;
+ int err;
+
+ /* Initially, allocate a single eRP row. Expand later as needed */
+ err = mlxsw_sp_acl_erp_table_alloc(erp_core, erp_core->num_erp_banks,
+ erp_table->aregion->type,
+ &erp_table->base_index);
+ if (err)
+ return err;
+ erp_table->num_max_atcam_erps = erp_core->num_erp_banks;
+
+ /* Transition the sole RP currently configured (the master RP)
+ * to the eRP table
+ */
+ master_rp = mlxsw_sp_acl_erp_table_master_rp(erp_table);
+ if (!master_rp) {
+ err = -EINVAL;
+ goto err_table_master_rp;
+ }
+
+ /* Maintain the same eRP bank for the master RP, so that we
+ * wouldn't need to update the bloom filter
+ */
+ master_rp->index = master_rp->index % erp_core->num_erp_banks;
+ __set_bit(master_rp->index, erp_table->erp_index_bitmap);
+
+ err = mlxsw_sp_acl_erp_table_erp_add(erp_table, master_rp);
+ if (err)
+ goto err_table_master_rp_add;
+
+ err = mlxsw_sp_acl_erp_table_enable(erp_table, false);
+ if (err)
+ goto err_table_enable;
+
+ return 0;
+
+err_table_enable:
+ mlxsw_sp_acl_erp_table_erp_del(master_rp);
+err_table_master_rp_add:
+ __clear_bit(master_rp->index, erp_table->erp_index_bitmap);
+err_table_master_rp:
+ mlxsw_sp_acl_erp_table_free(erp_core, erp_table->num_max_atcam_erps,
+ erp_table->aregion->type,
+ erp_table->base_index);
+ return err;
+}
+
+static void
+mlxsw_sp_acl_erp_region_master_mask_trans(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
+ struct mlxsw_sp_acl_erp *master_rp;
+
+ mlxsw_sp_acl_erp_table_disable(erp_table);
+ master_rp = mlxsw_sp_acl_erp_table_master_rp(erp_table);
+ if (!master_rp)
+ return;
+ mlxsw_sp_acl_erp_table_erp_del(master_rp);
+ __clear_bit(master_rp->index, erp_table->erp_index_bitmap);
+ mlxsw_sp_acl_erp_table_free(erp_core, erp_table->num_max_atcam_erps,
+ erp_table->aregion->type,
+ erp_table->base_index);
+}
+
+static int
+mlxsw_sp_acl_erp_region_erp_add(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ bool ctcam_le = erp_table->num_ctcam_erps > 0;
+ char pererp_pl[MLXSW_REG_PERERP_LEN];
+
+ mlxsw_reg_pererp_pack(pererp_pl, region->id, ctcam_le, true, 0,
+ erp_table->base_index, 0);
+ mlxsw_reg_pererp_erp_vector_pack(pererp_pl, erp_table->erp_index_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ mlxsw_reg_pererp_erpt_vector_set(pererp_pl, erp->index, true);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
+}
+
+static void mlxsw_sp_acl_erp_region_erp_del(struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
+ struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ bool ctcam_le = erp_table->num_ctcam_erps > 0;
+ char pererp_pl[MLXSW_REG_PERERP_LEN];
+
+ mlxsw_reg_pererp_pack(pererp_pl, region->id, ctcam_le, true, 0,
+ erp_table->base_index, 0);
+ mlxsw_reg_pererp_erp_vector_pack(pererp_pl, erp_table->erp_index_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ mlxsw_reg_pererp_erpt_vector_set(pererp_pl, erp->index, false);
+
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
+}
+
+static int
+mlxsw_sp_acl_erp_region_ctcam_enable(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ /* No need to re-enable lookup in the C-TCAM */
+ if (erp_table->num_ctcam_erps > 1)
+ return 0;
+
+ return mlxsw_sp_acl_erp_table_enable(erp_table, true);
+}
+
+static void
+mlxsw_sp_acl_erp_region_ctcam_disable(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ /* Only disable C-TCAM lookup when last C-TCAM eRP is deleted */
+ if (erp_table->num_ctcam_erps > 1)
+ return;
+
+ mlxsw_sp_acl_erp_table_enable(erp_table, false);
+}
+
+static void
+mlxsw_sp_acl_erp_ctcam_table_ops_set(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ switch (erp_table->num_atcam_erps) {
+ case 2:
+ /* Keep using the eRP table, but correctly set the
+ * operations pointer so that when an A-TCAM eRP is
+ * deleted we will transition to use the master mask
+ */
+ erp_table->ops = &erp_two_masks_ops;
+ break;
+ case 1:
+ /* We only kept the eRP table because we had C-TCAM
+ * eRPs in use. Now that the last C-TCAM eRP is gone we
+ * can stop using the table and transition to use the
+ * master mask
+ */
+ mlxsw_sp_acl_erp_region_master_mask_trans(erp_table);
+ erp_table->ops = &erp_single_mask_ops;
+ break;
+ case 0:
+ /* There are no more eRPs of any kind used by the region
+ * so free its eRP table and transition to initial state
+ */
+ mlxsw_sp_acl_erp_table_disable(erp_table);
+ mlxsw_sp_acl_erp_table_free(erp_table->erp_core,
+ erp_table->num_max_atcam_erps,
+ erp_table->aregion->type,
+ erp_table->base_index);
+ erp_table->ops = &erp_no_mask_ops;
+ break;
+ default:
+ break;
+ }
+}
+
+static struct mlxsw_sp_acl_erp *
+__mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ struct mlxsw_sp_acl_erp *erp;
+ int err;
+
+ erp = kzalloc(sizeof(*erp), GFP_KERNEL);
+ if (!erp)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&erp->key, key, sizeof(*key));
+ bitmap_from_arr32(erp->mask_bitmap, (u32 *) key->mask,
+ MLXSW_SP_ACL_TCAM_MASK_LEN);
+ refcount_set(&erp->refcnt, 1);
+ erp_table->num_ctcam_erps++;
+ erp->erp_table = erp_table;
+
+ err = mlxsw_sp_acl_erp_master_mask_set(erp_table, erp);
+ if (err)
+ goto err_master_mask_set;
+
+ err = rhashtable_insert_fast(&erp_table->erp_ht, &erp->ht_node,
+ mlxsw_sp_acl_erp_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ err = mlxsw_sp_acl_erp_region_ctcam_enable(erp_table);
+ if (err)
+ goto err_erp_region_ctcam_enable;
+
+ /* When C-TCAM is used, the eRP table must be used */
+ erp_table->ops = &erp_multiple_masks_ops;
+
+ return erp;
+
+err_erp_region_ctcam_enable:
+ rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
+ mlxsw_sp_acl_erp_ht_params);
+err_rhashtable_insert:
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
+err_master_mask_set:
+ erp_table->num_ctcam_erps--;
+ kfree(erp);
+ return ERR_PTR(err);
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ struct mlxsw_sp_acl_erp *erp;
+ int err;
+
+ /* There is a special situation where we need to spill rules
+ * into the C-TCAM, yet the region is still using a master
+ * mask and thus not performing a lookup in the C-TCAM. This
+ * can happen when two rules that only differ in priority - and
+ * thus sharing the same key - are programmed. In this case
+ * we transition the region to use an eRP table
+ */
+ err = mlxsw_sp_acl_erp_region_table_trans(erp_table);
+ if (err)
+ return ERR_PTR(err);
+
+ erp = __mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
+ if (IS_ERR(erp)) {
+ err = PTR_ERR(erp);
+ goto err_erp_create;
+ }
+
+ return erp;
+
+err_erp_create:
+ mlxsw_sp_acl_erp_region_master_mask_trans(erp_table);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_erp_ctcam_mask_destroy(struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
+
+ mlxsw_sp_acl_erp_region_ctcam_disable(erp_table);
+ rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
+ mlxsw_sp_acl_erp_ht_params);
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
+ erp_table->num_ctcam_erps--;
+ kfree(erp);
+
+ /* Once the last C-TCAM eRP was destroyed, the state we
+ * transition to depends on the number of A-TCAM eRPs currently
+ * in use
+ */
+ if (erp_table->num_ctcam_erps > 0)
+ return;
+ mlxsw_sp_acl_erp_ctcam_table_ops_set(erp_table);
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ struct mlxsw_sp_acl_erp *erp;
+ int err;
+
+ if (key->ctcam)
+ return __mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
+
+ /* Expand the eRP table for the new eRP, if needed */
+ err = mlxsw_sp_acl_erp_table_expand(erp_table);
+ if (err)
+ return ERR_PTR(err);
+
+ erp = mlxsw_sp_acl_erp_generic_create(erp_table, key);
+ if (IS_ERR(erp))
+ return erp;
+
+ err = mlxsw_sp_acl_erp_index_get(erp_table, &erp->index);
+ if (err)
+ goto err_erp_index_get;
+
+ err = mlxsw_sp_acl_erp_table_erp_add(erp_table, erp);
+ if (err)
+ goto err_table_erp_add;
+
+ err = mlxsw_sp_acl_erp_region_erp_add(erp_table, erp);
+ if (err)
+ goto err_region_erp_add;
+
+ erp_table->ops = &erp_multiple_masks_ops;
+
+ return erp;
+
+err_region_erp_add:
+ mlxsw_sp_acl_erp_table_erp_del(erp);
+err_table_erp_add:
+ mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
+err_erp_index_get:
+ mlxsw_sp_acl_erp_generic_destroy(erp);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_erp_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ if (erp->key.ctcam)
+ return mlxsw_sp_acl_erp_ctcam_mask_destroy(erp);
+
+ mlxsw_sp_acl_erp_region_erp_del(erp);
+ mlxsw_sp_acl_erp_table_erp_del(erp);
+ mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
+ mlxsw_sp_acl_erp_generic_destroy(erp);
+
+ if (erp_table->num_atcam_erps == 2 && erp_table->num_ctcam_erps == 0)
+ erp_table->ops = &erp_two_masks_ops;
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_second_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ struct mlxsw_sp_acl_erp *erp;
+ int err;
+
+ if (key->ctcam)
+ return mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
+
+ /* Transition to use eRP table instead of master mask */
+ err = mlxsw_sp_acl_erp_region_table_trans(erp_table);
+ if (err)
+ return ERR_PTR(err);
+
+ erp = mlxsw_sp_acl_erp_generic_create(erp_table, key);
+ if (IS_ERR(erp)) {
+ err = PTR_ERR(erp);
+ goto err_erp_create;
+ }
+
+ err = mlxsw_sp_acl_erp_index_get(erp_table, &erp->index);
+ if (err)
+ goto err_erp_index_get;
+
+ err = mlxsw_sp_acl_erp_table_erp_add(erp_table, erp);
+ if (err)
+ goto err_table_erp_add;
+
+ err = mlxsw_sp_acl_erp_region_erp_add(erp_table, erp);
+ if (err)
+ goto err_region_erp_add;
+
+ erp_table->ops = &erp_two_masks_ops;
+
+ return erp;
+
+err_region_erp_add:
+ mlxsw_sp_acl_erp_table_erp_del(erp);
+err_table_erp_add:
+ mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
+err_erp_index_get:
+ mlxsw_sp_acl_erp_generic_destroy(erp);
+err_erp_create:
+ mlxsw_sp_acl_erp_region_master_mask_trans(erp_table);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_erp_second_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ if (erp->key.ctcam)
+ return mlxsw_sp_acl_erp_ctcam_mask_destroy(erp);
+
+ mlxsw_sp_acl_erp_region_erp_del(erp);
+ mlxsw_sp_acl_erp_table_erp_del(erp);
+ mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
+ mlxsw_sp_acl_erp_generic_destroy(erp);
+ /* Transition to use master mask instead of eRP table */
+ mlxsw_sp_acl_erp_region_master_mask_trans(erp_table);
+
+ erp_table->ops = &erp_single_mask_ops;
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_first_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ struct mlxsw_sp_acl_erp *erp;
+
+ if (key->ctcam)
+ return ERR_PTR(-EINVAL);
+
+ erp = mlxsw_sp_acl_erp_generic_create(erp_table, key);
+ if (IS_ERR(erp))
+ return erp;
+
+ erp_table->ops = &erp_single_mask_ops;
+
+ return erp;
+}
+
+static void
+mlxsw_sp_acl_erp_first_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ mlxsw_sp_acl_erp_generic_destroy(erp);
+ erp_table->ops = &erp_no_mask_ops;
+}
+
+static void
+mlxsw_sp_acl_erp_no_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ WARN_ON(1);
+}
+
+struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion,
+ const char *mask, bool ctcam)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+ struct mlxsw_sp_acl_erp_key key;
+ struct mlxsw_sp_acl_erp *erp;
+
+ /* eRPs are allocated from a shared resource, but currently all
+ * allocations are done under RTNL.
+ */
+ ASSERT_RTNL();
+
+ memcpy(key.mask, mask, MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
+ key.ctcam = ctcam;
+ erp = rhashtable_lookup_fast(&erp_table->erp_ht, &key,
+ mlxsw_sp_acl_erp_ht_params);
+ if (erp) {
+ refcount_inc(&erp->refcnt);
+ return erp;
+ }
+
+ return erp_table->ops->erp_create(erp_table, &key);
+}
+
+void mlxsw_sp_acl_erp_put(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+
+ ASSERT_RTNL();
+
+ if (!refcount_dec_and_test(&erp->refcnt))
+ return;
+
+ erp_table->ops->erp_destroy(erp_table, erp);
+}
+
+static struct mlxsw_sp_acl_erp_table *
+mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table;
+ int err;
+
+ erp_table = kzalloc(sizeof(*erp_table), GFP_KERNEL);
+ if (!erp_table)
+ return ERR_PTR(-ENOMEM);
+
+ err = rhashtable_init(&erp_table->erp_ht, &mlxsw_sp_acl_erp_ht_params);
+ if (err)
+ goto err_rhashtable_init;
+
+ erp_table->erp_core = aregion->atcam->erp_core;
+ erp_table->ops = &erp_no_mask_ops;
+ INIT_LIST_HEAD(&erp_table->atcam_erps_list);
+ erp_table->aregion = aregion;
+
+ return erp_table;
+
+err_rhashtable_init:
+ kfree(erp_table);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_erp_table_destroy(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ WARN_ON(!list_empty(&erp_table->atcam_erps_list));
+ rhashtable_destroy(&erp_table->erp_ht);
+ kfree(erp_table);
+}
+
+static int
+mlxsw_sp_acl_erp_master_mask_init(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp;
+ char percr_pl[MLXSW_REG_PERCR_LEN];
+
+ mlxsw_reg_percr_pack(percr_pl, aregion->region->id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(percr), percr_pl);
+}
+
+static int
+mlxsw_sp_acl_erp_region_param_init(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp;
+ char pererp_pl[MLXSW_REG_PERERP_LEN];
+
+ mlxsw_reg_pererp_pack(pererp_pl, aregion->region->id, false, false, 0,
+ 0, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
+}
+
+int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table;
+ int err;
+
+ erp_table = mlxsw_sp_acl_erp_table_create(aregion);
+ if (IS_ERR(erp_table))
+ return PTR_ERR(erp_table);
+ aregion->erp_table = erp_table;
+
+ /* Initialize the region's master mask to all zeroes */
+ err = mlxsw_sp_acl_erp_master_mask_init(aregion);
+ if (err)
+ goto err_erp_master_mask_init;
+
+ /* Initialize the region to not use the eRP table */
+ err = mlxsw_sp_acl_erp_region_param_init(aregion);
+ if (err)
+ goto err_erp_region_param_init;
+
+ return 0;
+
+err_erp_region_param_init:
+err_erp_master_mask_init:
+ mlxsw_sp_acl_erp_table_destroy(erp_table);
+ return err;
+}
+
+void mlxsw_sp_acl_erp_region_fini(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ mlxsw_sp_acl_erp_table_destroy(aregion->erp_table);
+}
+
+static int
+mlxsw_sp_acl_erp_tables_sizes_query(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_erp_core *erp_core)
+{
+ unsigned int size;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_2KB) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_4KB) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_8KB) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_12KB))
+ return -EIO;
+
+ size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_2KB);
+ erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB] = size;
+
+ size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_4KB);
+ erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB] = size;
+
+ size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_8KB);
+ erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB] = size;
+
+ size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_12KB);
+ erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB] = size;
+
+ return 0;
+}
+
+static int mlxsw_sp_acl_erp_tables_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_erp_core *erp_core)
+{
+ unsigned int erpt_bank_size;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_ERPT_BANK_SIZE) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_ERPT_BANKS))
+ return -EIO;
+ erpt_bank_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_MAX_ERPT_BANK_SIZE);
+ erp_core->num_erp_banks = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_MAX_ERPT_BANKS);
+
+ erp_core->erp_tables = gen_pool_create(0, -1);
+ if (!erp_core->erp_tables)
+ return -ENOMEM;
+ gen_pool_set_algo(erp_core->erp_tables, gen_pool_best_fit, NULL);
+
+ err = gen_pool_add(erp_core->erp_tables,
+ MLXSW_SP_ACL_ERP_GENALLOC_OFFSET, erpt_bank_size,
+ -1);
+ if (err)
+ goto err_gen_pool_add;
+
+ /* Different regions require masks of different sizes */
+ err = mlxsw_sp_acl_erp_tables_sizes_query(mlxsw_sp, erp_core);
+ if (err)
+ goto err_erp_tables_sizes_query;
+
+ return 0;
+
+err_erp_tables_sizes_query:
+err_gen_pool_add:
+ gen_pool_destroy(erp_core->erp_tables);
+ return err;
+}
+
+static void mlxsw_sp_acl_erp_tables_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_erp_core *erp_core)
+{
+ gen_pool_destroy(erp_core->erp_tables);
+}
+
+int mlxsw_sp_acl_erps_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam)
+{
+ struct mlxsw_sp_acl_erp_core *erp_core;
+ int err;
+
+ erp_core = kzalloc(sizeof(*erp_core), GFP_KERNEL);
+ if (!erp_core)
+ return -ENOMEM;
+ erp_core->mlxsw_sp = mlxsw_sp;
+ atcam->erp_core = erp_core;
+
+ err = mlxsw_sp_acl_erp_tables_init(mlxsw_sp, erp_core);
+ if (err)
+ goto err_erp_tables_init;
+
+ return 0;
+
+err_erp_tables_init:
+ kfree(erp_core);
+ return err;
+}
+
+void mlxsw_sp_acl_erps_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam)
+{
+ mlxsw_sp_acl_erp_tables_fini(mlxsw_sp, atcam->erp_core);
+ kfree(atcam->erp_core);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
index 510ce48d87f7..e47d1d286e93 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
@@ -1,46 +1,12 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
- * Copyright (c) 2017, 2018 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include "spectrum_acl_flex_actions.h"
#include "core_acl_flex_actions.h"
#include "spectrum_span.h"
-#define MLXSW_SP_KVDL_ACT_EXT_SIZE 1
-
static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
- char *enc_actions, bool is_first)
+ char *enc_actions, bool is_first, bool ca)
{
struct mlxsw_sp *mlxsw_sp = priv;
char pefa_pl[MLXSW_REG_PEFA_LEN];
@@ -53,11 +19,11 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
if (is_first)
return 0;
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ACT_EXT_SIZE,
- &kvdl_index);
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ 1, &kvdl_index);
if (err)
return err;
- mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions);
+ mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, ca, enc_actions);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
if (err)
goto err_pefa_write;
@@ -65,10 +31,25 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
return 0;
err_pefa_write:
- mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ 1, kvdl_index);
return err;
}
+static int mlxsw_sp1_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
+ char *enc_actions, bool is_first)
+{
+ return mlxsw_sp_act_kvdl_set_add(priv, p_kvdl_index, enc_actions,
+ is_first, false);
+}
+
+static int mlxsw_sp2_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
+ char *enc_actions, bool is_first)
+{
+ return mlxsw_sp_act_kvdl_set_add(priv, p_kvdl_index, enc_actions,
+ is_first, true);
+}
+
static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index,
bool is_first)
{
@@ -76,7 +57,29 @@ static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index,
if (is_first)
return;
- mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ 1, kvdl_index);
+}
+
+static int mlxsw_sp1_act_kvdl_set_activity_get(void *priv, u32 kvdl_index,
+ bool *activity)
+{
+ return -EOPNOTSUPP;
+}
+
+static int mlxsw_sp2_act_kvdl_set_activity_get(void *priv, u32 kvdl_index,
+ bool *activity)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ char pefa_pl[MLXSW_REG_PEFA_LEN];
+ int err;
+
+ mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, true, NULL);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
+ if (err)
+ return err;
+ mlxsw_reg_pefa_unpack(pefa_pl, activity);
+ return 0;
}
static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
@@ -87,7 +90,8 @@ static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
u32 kvdl_index;
int err;
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index);
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
+ 1, &kvdl_index);
if (err)
return err;
mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port);
@@ -98,7 +102,8 @@ static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
return 0;
err_ppbs_write:
- mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
+ 1, kvdl_index);
return err;
}
@@ -106,7 +111,8 @@ static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index)
{
struct mlxsw_sp *mlxsw_sp = priv;
- mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
+ 1, kvdl_index);
}
static int
@@ -154,22 +160,36 @@ mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, int span_id, bool ingress)
mlxsw_sp_span_mirror_del(in_port, span_id, type, false);
}
-static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
- .kvdl_set_add = mlxsw_sp_act_kvdl_set_add,
+const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = {
+ .kvdl_set_add = mlxsw_sp1_act_kvdl_set_add,
+ .kvdl_set_del = mlxsw_sp_act_kvdl_set_del,
+ .kvdl_set_activity_get = mlxsw_sp1_act_kvdl_set_activity_get,
+ .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add,
+ .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del,
+ .counter_index_get = mlxsw_sp_act_counter_index_get,
+ .counter_index_put = mlxsw_sp_act_counter_index_put,
+ .mirror_add = mlxsw_sp_act_mirror_add,
+ .mirror_del = mlxsw_sp_act_mirror_del,
+};
+
+const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops = {
+ .kvdl_set_add = mlxsw_sp2_act_kvdl_set_add,
.kvdl_set_del = mlxsw_sp_act_kvdl_set_del,
+ .kvdl_set_activity_get = mlxsw_sp2_act_kvdl_set_activity_get,
.kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add,
.kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del,
.counter_index_get = mlxsw_sp_act_counter_index_get,
.counter_index_put = mlxsw_sp_act_counter_index_put,
.mirror_add = mlxsw_sp_act_mirror_add,
.mirror_del = mlxsw_sp_act_mirror_del,
+ .dummy_first_set = true,
};
int mlxsw_sp_afa_init(struct mlxsw_sp *mlxsw_sp)
{
mlxsw_sp->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_ACTIONS_PER_SET),
- &mlxsw_sp_act_afa_ops, mlxsw_sp);
+ mlxsw_sp->afa_ops, mlxsw_sp);
return PTR_ERR_OR_ZERO(mlxsw_sp->afa);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h
index bd6d552d95b9..fe436d816d0c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h
@@ -1,37 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_SPECTRUM_ACL_FLEX_ACTIONS_H
#define _MLXSW_SPECTRUM_ACL_FLEX_ACTIONS_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
new file mode 100644
index 000000000000..d409b09ba8df
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "spectrum.h"
+#include "item.h"
+#include "core_acl_flex_keys.h"
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x00, 2),
+ MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x02, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x00, 2),
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x02, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x02, 2),
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x04, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x0C, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 4, 2),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 24, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x08, 0, 6),
+ MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x08, 8, 9), /* TCP_CONTROL+TCP_ECN */
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x00, 0, 12),
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 29, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16),
+ MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_32_63, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_96_127, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_64_95, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x00, 0, 16),
+};
+
+static const struct mlxsw_afk_block mlxsw_sp1_afk_blocks[] = {
+ MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_l2_dmac),
+ MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_l2_smac),
+ MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_l2_smac_ex),
+ MLXSW_AFK_BLOCK(0x30, mlxsw_sp_afk_element_info_ipv4_sip),
+ MLXSW_AFK_BLOCK(0x31, mlxsw_sp_afk_element_info_ipv4_dip),
+ MLXSW_AFK_BLOCK(0x32, mlxsw_sp_afk_element_info_ipv4),
+ MLXSW_AFK_BLOCK(0x33, mlxsw_sp_afk_element_info_ipv4_ex),
+ MLXSW_AFK_BLOCK(0x60, mlxsw_sp_afk_element_info_ipv6_dip),
+ MLXSW_AFK_BLOCK(0x65, mlxsw_sp_afk_element_info_ipv6_ex1),
+ MLXSW_AFK_BLOCK(0x62, mlxsw_sp_afk_element_info_ipv6_sip),
+ MLXSW_AFK_BLOCK(0x63, mlxsw_sp_afk_element_info_ipv6_sip_ex),
+ MLXSW_AFK_BLOCK(0xB0, mlxsw_sp_afk_element_info_packet_type),
+};
+
+#define MLXSW_SP1_AFK_KEY_BLOCK_SIZE 16
+
+static void mlxsw_sp1_afk_encode_block(char *block, int block_index,
+ char *output)
+{
+ unsigned int offset = block_index * MLXSW_SP1_AFK_KEY_BLOCK_SIZE;
+ char *output_indexed = output + offset;
+
+ memcpy(output_indexed, block, MLXSW_SP1_AFK_KEY_BLOCK_SIZE);
+}
+
+const struct mlxsw_afk_ops mlxsw_sp1_afk_ops = {
+ .blocks = mlxsw_sp1_afk_blocks,
+ .blocks_count = ARRAY_SIZE(mlxsw_sp1_afk_blocks),
+ .encode_block = mlxsw_sp1_afk_encode_block,
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_0[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_1[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_2[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x04, 2),
+ MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x06, 2),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_3[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x00, 0, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
+ MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x06, 2),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_4[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x00, 0, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
+ MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x04, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x04, 0, 8), /* RX_ACL_SYSTEM_PORT */
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_0[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_1[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x04, 0, 6),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 6, 2),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 8, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x04, 16, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_1[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_3[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_32_63, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_4[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_64_95, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_5[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_96_127, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_0[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x04, 16, 16),
+ MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x04, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_2[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x04, 16, 9), /* TCP_CONTROL + TCP_ECN */
+};
+
+static const struct mlxsw_afk_block mlxsw_sp2_afk_blocks[] = {
+ MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_mac_0),
+ MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_mac_1),
+ MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_mac_2),
+ MLXSW_AFK_BLOCK(0x13, mlxsw_sp_afk_element_info_mac_3),
+ MLXSW_AFK_BLOCK(0x14, mlxsw_sp_afk_element_info_mac_4),
+ MLXSW_AFK_BLOCK(0x15, mlxsw_sp_afk_element_info_mac_5),
+ MLXSW_AFK_BLOCK(0x38, mlxsw_sp_afk_element_info_ipv4_0),
+ MLXSW_AFK_BLOCK(0x39, mlxsw_sp_afk_element_info_ipv4_1),
+ MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2),
+ MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0),
+ MLXSW_AFK_BLOCK(0x41, mlxsw_sp_afk_element_info_ipv6_1),
+ MLXSW_AFK_BLOCK(0x42, mlxsw_sp_afk_element_info_ipv6_2),
+ MLXSW_AFK_BLOCK(0x43, mlxsw_sp_afk_element_info_ipv6_3),
+ MLXSW_AFK_BLOCK(0x44, mlxsw_sp_afk_element_info_ipv6_4),
+ MLXSW_AFK_BLOCK(0x45, mlxsw_sp_afk_element_info_ipv6_5),
+ MLXSW_AFK_BLOCK(0x90, mlxsw_sp_afk_element_info_l4_0),
+ MLXSW_AFK_BLOCK(0x92, mlxsw_sp_afk_element_info_l4_2),
+};
+
+#define MLXSW_SP2_AFK_BITS_PER_BLOCK 36
+
+/* A block in Spectrum-2 is of the following form:
+ *
+ * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+ * | | | | | | | | | | | | | | | | | | | | | | | | | | | | |35|34|33|32|
+ * +-----------------------------------------------------------------------------------------------+
+ * |31|30|29|28|27|26|25|24|23|22|21|20|19|18|17|16|15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0|
+ * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+ */
+MLXSW_ITEM64(sp2_afk, block, value, 0x00, 0, MLXSW_SP2_AFK_BITS_PER_BLOCK);
+
+/* The key / mask block layout in Spectrum-2 is of the following form:
+ *
+ * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+ * | | | | | | | | | | | | | | | | | block11_high |
+ * +-----------------------------------------------------------------------------------------------+
+ * | block11_low | block10_high |
+ * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+ * ...
+ */
+
+struct mlxsw_sp2_afk_block_layout {
+ unsigned short offset;
+ struct mlxsw_item item;
+};
+
+#define MLXSW_SP2_AFK_BLOCK_LAYOUT(_block, _offset, _shift) \
+ { \
+ .offset = _offset, \
+ { \
+ .shift = _shift, \
+ .size = {.bits = MLXSW_SP2_AFK_BITS_PER_BLOCK}, \
+ .name = #_block, \
+ } \
+ } \
+
+static const struct mlxsw_sp2_afk_block_layout mlxsw_sp2_afk_blocks_layout[] = {
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block0, 0x30, 0),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block1, 0x2C, 4),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block2, 0x28, 8),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block3, 0x24, 12),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block4, 0x20, 16),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block5, 0x1C, 20),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block6, 0x18, 24),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block7, 0x14, 28),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block8, 0x0C, 0),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block9, 0x08, 4),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block10, 0x04, 8),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block11, 0x00, 12),
+};
+
+static void mlxsw_sp2_afk_encode_block(char *block, int block_index,
+ char *output)
+{
+ u64 block_value = mlxsw_sp2_afk_block_value_get(block);
+ const struct mlxsw_sp2_afk_block_layout *block_layout;
+
+ if (WARN_ON(block_index < 0 ||
+ block_index >= ARRAY_SIZE(mlxsw_sp2_afk_blocks_layout)))
+ return;
+
+ block_layout = &mlxsw_sp2_afk_blocks_layout[block_index];
+ __mlxsw_item_set64(output + block_layout->offset,
+ &block_layout->item, 0, block_value);
+}
+
+const struct mlxsw_afk_ops mlxsw_sp2_afk_ops = {
+ .blocks = mlxsw_sp2_afk_blocks,
+ .blocks_count = ARRAY_SIZE(mlxsw_sp2_afk_blocks),
+ .encode_block = mlxsw_sp2_afk_encode_block,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
deleted file mode 100644
index fb8031828454..000000000000
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H
-#define _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H
-
-#include "core_acl_flex_keys.h"
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
- MLXSW_AFK_ELEMENT_INST_BUF(DMAC, 0x00, 6),
- MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
- MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
- MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
- MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x00, 6),
- MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
- MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
- MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = {
- MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x02, 6),
- MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x0C, 0, 16),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = {
- MLXSW_AFK_ELEMENT_INST_U32(SRC_IP4, 0x00, 0, 32),
- MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
- MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
- MLXSW_AFK_ELEMENT_INST_U32(DST_IP4, 0x00, 0, 32),
- MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
- MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = {
- MLXSW_AFK_ELEMENT_INST_U32(SRC_IP4, 0x00, 0, 32),
- MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 4, 2),
- MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 24, 8),
- MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x08, 0, 6),
- MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x08, 8, 9), /* TCP_CONTROL+TCP_ECN */
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = {
- MLXSW_AFK_ELEMENT_INST_U32(VID, 0x00, 0, 12),
- MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 29, 3),
- MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16),
- MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = {
- MLXSW_AFK_ELEMENT_INST_BUF(DST_IP6_LO, 0x00, 8),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = {
- MLXSW_AFK_ELEMENT_INST_BUF(DST_IP6_HI, 0x00, 8),
- MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = {
- MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP6_LO, 0x00, 8),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = {
- MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP6_HI, 0x00, 8),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = {
- MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x00, 0, 16),
-};
-
-static const struct mlxsw_afk_block mlxsw_sp_afk_blocks[] = {
- MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_l2_dmac),
- MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_l2_smac),
- MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_l2_smac_ex),
- MLXSW_AFK_BLOCK(0x30, mlxsw_sp_afk_element_info_ipv4_sip),
- MLXSW_AFK_BLOCK(0x31, mlxsw_sp_afk_element_info_ipv4_dip),
- MLXSW_AFK_BLOCK(0x32, mlxsw_sp_afk_element_info_ipv4),
- MLXSW_AFK_BLOCK(0x33, mlxsw_sp_afk_element_info_ipv4_ex),
- MLXSW_AFK_BLOCK(0x60, mlxsw_sp_afk_element_info_ipv6_dip),
- MLXSW_AFK_BLOCK(0x65, mlxsw_sp_afk_element_info_ipv6_ex1),
- MLXSW_AFK_BLOCK(0x62, mlxsw_sp_afk_element_info_ipv6_sip),
- MLXSW_AFK_BLOCK(0x63, mlxsw_sp_afk_element_info_ipv6_sip_ex),
- MLXSW_AFK_BLOCK(0xB0, mlxsw_sp_afk_element_info_packet_type),
-};
-
-#define MLXSW_SP_AFK_BLOCKS_COUNT ARRAY_SIZE(mlxsw_sp_afk_blocks)
-
-#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index ad1b548e3cac..e171513bb32a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -39,25 +8,25 @@
#include <linux/list.h>
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
-#include <linux/parman.h>
#include "reg.h"
#include "core.h"
#include "resources.h"
#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
#include "core_acl_flex_keys.h"
-struct mlxsw_sp_acl_tcam {
- unsigned long *used_regions; /* bit array */
- unsigned int max_regions;
- unsigned long *used_groups; /* bit array */
- unsigned int max_groups;
- unsigned int max_group_size;
-};
+size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+ return ops->priv_size;
+}
-static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam)
{
- struct mlxsw_sp_acl_tcam *tcam = priv;
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
u64 max_tcam_regions;
u64 max_regions;
u64 max_groups;
@@ -88,21 +57,53 @@ static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
tcam->max_groups = max_groups;
tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_MAX_GROUP_SIZE);
+
+ err = ops->init(mlxsw_sp, tcam->priv, tcam);
+ if (err)
+ goto err_tcam_init;
+
return 0;
+err_tcam_init:
+ kfree(tcam->used_groups);
err_alloc_used_groups:
kfree(tcam->used_regions);
return err;
}
-static void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam)
{
- struct mlxsw_sp_acl_tcam *tcam = priv;
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+ ops->fini(mlxsw_sp, tcam->priv);
kfree(tcam->used_groups);
kfree(tcam->used_regions);
}
+int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u32 *priority, bool fillup_priority)
+{
+ u64 max_priority;
+
+ if (!fillup_priority) {
+ *priority = 0;
+ return 0;
+ }
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
+ return -EIO;
+
+ max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE);
+ if (rulei->priority > max_priority)
+ return -EINVAL;
+
+ /* Unlike in TC, in HW, higher number means higher priority. */
+ *priority = max_priority - rulei->priority;
+ return 0;
+}
+
static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
u16 *p_id)
{
@@ -157,37 +158,25 @@ struct mlxsw_sp_acl_tcam_group {
struct mlxsw_sp_acl_tcam_group_ops *ops;
const struct mlxsw_sp_acl_tcam_pattern *patterns;
unsigned int patterns_count;
-};
-
-struct mlxsw_sp_acl_tcam_region {
- struct list_head list; /* Member of a TCAM group */
- struct list_head chunk_list; /* List of chunks under this region */
- struct parman *parman;
- struct mlxsw_sp *mlxsw_sp;
- struct mlxsw_sp_acl_tcam_group *group;
- u16 id; /* ACL ID and region ID - they are same */
- char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN];
- struct mlxsw_afk_key_info *key_info;
- struct {
- struct parman_prio parman_prio;
- struct parman_item parman_item;
- struct mlxsw_sp_acl_rule_info *rulei;
- } catchall;
+ bool tmplt_elusage_set;
+ struct mlxsw_afk_element_usage tmplt_elusage;
};
struct mlxsw_sp_acl_tcam_chunk {
struct list_head list; /* Member of a TCAM region */
struct rhash_head ht_node; /* Member of a chunk HT */
unsigned int priority; /* Priority within the region and group */
- struct parman_prio parman_prio;
struct mlxsw_sp_acl_tcam_group *group;
struct mlxsw_sp_acl_tcam_region *region;
unsigned int ref_count;
+ unsigned long priv[0];
+ /* priv has to be always the last item */
};
struct mlxsw_sp_acl_tcam_entry {
- struct parman_item parman_item;
struct mlxsw_sp_acl_tcam_chunk *chunk;
+ unsigned long priv[0];
+ /* priv has to be always the last item */
};
static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = {
@@ -216,13 +205,19 @@ mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam,
struct mlxsw_sp_acl_tcam_group *group,
const struct mlxsw_sp_acl_tcam_pattern *patterns,
- unsigned int patterns_count)
+ unsigned int patterns_count,
+ struct mlxsw_afk_element_usage *tmplt_elusage)
{
int err;
group->tcam = tcam;
group->patterns = patterns;
group->patterns_count = patterns_count;
+ if (tmplt_elusage) {
+ group->tmplt_elusage_set = true;
+ memcpy(&group->tmplt_elusage, tmplt_elusage,
+ sizeof(group->tmplt_elusage));
+ }
INIT_LIST_HEAD(&group->region_list);
err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
if (err)
@@ -431,6 +426,15 @@ mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group,
const struct mlxsw_sp_acl_tcam_pattern *pattern;
int i;
+ /* In case the template is set, we don't have to look up the pattern
+ * and just use the template.
+ */
+ if (group->tmplt_elusage_set) {
+ memcpy(out, &group->tmplt_elusage, sizeof(*out));
+ WARN_ON(!mlxsw_afk_element_usage_subset(elusage, out));
+ return;
+ }
+
for (i = 0; i < group->patterns_count; i++) {
pattern = &group->patterns[i];
mlxsw_afk_element_usage_fill(out, pattern->elements,
@@ -441,9 +445,6 @@ mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group,
memcpy(out, elusage, sizeof(*out));
}
-#define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16
-#define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16
-
static int
mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region)
@@ -455,6 +456,7 @@ mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
int err;
mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
+ region->key_type,
MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
region->id, region->tcam_region_info);
encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
@@ -477,24 +479,13 @@ mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
{
char ptar_pl[MLXSW_REG_PTAR_LEN];
- mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE, 0, region->id,
+ mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE,
+ region->key_type, 0, region->id,
region->tcam_region_info);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
}
static int
-mlxsw_sp_acl_tcam_region_resize(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region,
- u16 new_size)
-{
- char ptar_pl[MLXSW_REG_PTAR_LEN];
-
- mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE,
- new_size, region->id, region->tcam_region_info);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
-}
-
-static int
mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region)
{
@@ -516,193 +507,22 @@ mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
}
-static int
-mlxsw_sp_acl_tcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region,
- unsigned int offset,
- struct mlxsw_sp_acl_rule_info *rulei)
-{
- char ptce2_pl[MLXSW_REG_PTCE2_LEN];
- char *act_set;
- char *mask;
- char *key;
-
- mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
- region->tcam_region_info, offset);
- key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl);
- mask = mlxsw_reg_ptce2_mask_data(ptce2_pl);
- mlxsw_afk_encode(region->key_info, &rulei->values, key, mask);
-
- /* Only the first action set belongs here, the rest is in KVD */
- act_set = mlxsw_afa_block_first_set(rulei->act_block);
- mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
-}
-
-static void
-mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region,
- unsigned int offset)
-{
- char ptce2_pl[MLXSW_REG_PTCE2_LEN];
-
- mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
- region->tcam_region_info, offset);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
-}
-
-static int
-mlxsw_sp_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region,
- unsigned int offset,
- bool *activity)
-{
- char ptce2_pl[MLXSW_REG_PTCE2_LEN];
- int err;
-
- mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ,
- region->tcam_region_info, offset);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
- if (err)
- return err;
- *activity = mlxsw_reg_ptce2_a_get(ptce2_pl);
- return 0;
-}
-
-#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U)
-
-static int
-mlxsw_sp_acl_tcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region)
-{
- struct parman_prio *parman_prio = &region->catchall.parman_prio;
- struct parman_item *parman_item = &region->catchall.parman_item;
- struct mlxsw_sp_acl_rule_info *rulei;
- int err;
-
- parman_prio_init(region->parman, parman_prio,
- MLXSW_SP_ACL_TCAM_CATCHALL_PRIO);
- err = parman_item_add(region->parman, parman_prio, parman_item);
- if (err)
- goto err_parman_item_add;
-
- rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
- if (IS_ERR(rulei)) {
- err = PTR_ERR(rulei);
- goto err_rulei_create;
- }
-
- err = mlxsw_sp_acl_rulei_act_continue(rulei);
- if (WARN_ON(err))
- goto err_rulei_act_continue;
-
- err = mlxsw_sp_acl_rulei_commit(rulei);
- if (err)
- goto err_rulei_commit;
-
- err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region,
- parman_item->index, rulei);
- region->catchall.rulei = rulei;
- if (err)
- goto err_rule_insert;
-
- return 0;
-
-err_rule_insert:
-err_rulei_commit:
-err_rulei_act_continue:
- mlxsw_sp_acl_rulei_destroy(rulei);
-err_rulei_create:
- parman_item_remove(region->parman, parman_prio, parman_item);
-err_parman_item_add:
- parman_prio_fini(parman_prio);
- return err;
-}
-
-static void
-mlxsw_sp_acl_tcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region)
-{
- struct parman_prio *parman_prio = &region->catchall.parman_prio;
- struct parman_item *parman_item = &region->catchall.parman_item;
- struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei;
-
- mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region,
- parman_item->index);
- mlxsw_sp_acl_rulei_destroy(rulei);
- parman_item_remove(region->parman, parman_prio, parman_item);
- parman_prio_fini(parman_prio);
-}
-
-static void
-mlxsw_sp_acl_tcam_region_move(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region,
- u16 src_offset, u16 dst_offset, u16 size)
-{
- char prcr_pl[MLXSW_REG_PRCR_LEN];
-
- mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE,
- region->tcam_region_info, src_offset,
- region->tcam_region_info, dst_offset, size);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl);
-}
-
-static int mlxsw_sp_acl_tcam_region_parman_resize(void *priv,
- unsigned long new_count)
-{
- struct mlxsw_sp_acl_tcam_region *region = priv;
- struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
- u64 max_tcam_rules;
-
- max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
- if (new_count > max_tcam_rules)
- return -EINVAL;
- return mlxsw_sp_acl_tcam_region_resize(mlxsw_sp, region, new_count);
-}
-
-static void mlxsw_sp_acl_tcam_region_parman_move(void *priv,
- unsigned long from_index,
- unsigned long to_index,
- unsigned long count)
-{
- struct mlxsw_sp_acl_tcam_region *region = priv;
- struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
-
- mlxsw_sp_acl_tcam_region_move(mlxsw_sp, region,
- from_index, to_index, count);
-}
-
-static const struct parman_ops mlxsw_sp_acl_tcam_region_parman_ops = {
- .base_count = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
- .resize_step = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP,
- .resize = mlxsw_sp_acl_tcam_region_parman_resize,
- .move = mlxsw_sp_acl_tcam_region_parman_move,
- .algo = PARMAN_ALGO_TYPE_LSORT,
-};
-
static struct mlxsw_sp_acl_tcam_region *
mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam,
struct mlxsw_afk_element_usage *elusage)
{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
struct mlxsw_sp_acl_tcam_region *region;
int err;
- region = kzalloc(sizeof(*region), GFP_KERNEL);
+ region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL);
if (!region)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&region->chunk_list);
region->mlxsw_sp = mlxsw_sp;
- region->parman = parman_create(&mlxsw_sp_acl_tcam_region_parman_ops,
- region);
- if (!region->parman) {
- err = -ENOMEM;
- goto err_parman_create;
- }
-
region->key_info = mlxsw_afk_key_info_get(afk, elusage);
if (IS_ERR(region->key_info)) {
err = PTR_ERR(region->key_info);
@@ -713,6 +533,11 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_region_id_get;
+ err = ops->region_associate(mlxsw_sp, region);
+ if (err)
+ goto err_tcam_region_associate;
+
+ region->key_type = ops->key_type;
err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
if (err)
goto err_tcam_region_alloc;
@@ -721,23 +546,22 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_tcam_region_enable;
- err = mlxsw_sp_acl_tcam_region_catchall_add(mlxsw_sp, region);
+ err = ops->region_init(mlxsw_sp, region->priv, tcam->priv, region);
if (err)
- goto err_tcam_region_catchall_add;
+ goto err_tcam_region_init;
return region;
-err_tcam_region_catchall_add:
+err_tcam_region_init:
mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
err_tcam_region_enable:
mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
err_tcam_region_alloc:
+err_tcam_region_associate:
mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
err_region_id_get:
mlxsw_afk_key_info_put(region->key_info);
err_key_info_get:
- parman_destroy(region->parman);
-err_parman_create:
kfree(region);
return ERR_PTR(err);
}
@@ -746,12 +570,13 @@ static void
mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region)
{
- mlxsw_sp_acl_tcam_region_catchall_del(mlxsw_sp, region);
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+ ops->region_fini(mlxsw_sp, region->priv);
mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id);
mlxsw_afk_key_info_put(region->key_info);
- parman_destroy(region->parman);
kfree(region);
}
@@ -826,13 +651,14 @@ mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
unsigned int priority,
struct mlxsw_afk_element_usage *elusage)
{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_sp_acl_tcam_chunk *chunk;
int err;
if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
return ERR_PTR(-EINVAL);
- chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
+ chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL);
if (!chunk)
return ERR_PTR(-ENOMEM);
chunk->priority = priority;
@@ -844,7 +670,7 @@ mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_chunk_assoc;
- parman_prio_init(chunk->region->parman, &chunk->parman_prio, priority);
+ ops->chunk_init(chunk->region->priv, chunk->priv, priority);
err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node,
mlxsw_sp_acl_tcam_chunk_ht_params);
@@ -854,7 +680,7 @@ mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
return chunk;
err_rhashtable_insert:
- parman_prio_fini(&chunk->parman_prio);
+ ops->chunk_fini(chunk->priv);
mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
err_chunk_assoc:
kfree(chunk);
@@ -865,11 +691,12 @@ static void
mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_chunk *chunk)
{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_sp_acl_tcam_group *group = chunk->group;
rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node,
mlxsw_sp_acl_tcam_chunk_ht_params);
- parman_prio_fini(&chunk->parman_prio);
+ ops->chunk_fini(chunk->priv);
mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
kfree(chunk);
}
@@ -903,11 +730,19 @@ static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk);
}
+static size_t mlxsw_sp_acl_tcam_entry_priv_size(struct mlxsw_sp *mlxsw_sp)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+ return ops->entry_priv_size;
+}
+
static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_group *group,
struct mlxsw_sp_acl_tcam_entry *entry,
struct mlxsw_sp_acl_rule_info *rulei)
{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_sp_acl_tcam_chunk *chunk;
struct mlxsw_sp_acl_tcam_region *region;
int err;
@@ -918,24 +753,16 @@ static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
return PTR_ERR(chunk);
region = chunk->region;
- err = parman_item_add(region->parman, &chunk->parman_prio,
- &entry->parman_item);
- if (err)
- goto err_parman_item_add;
- err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region,
- entry->parman_item.index,
- rulei);
+ err = ops->entry_add(mlxsw_sp, region->priv, chunk->priv,
+ entry->priv, rulei);
if (err)
- goto err_rule_insert;
+ goto err_entry_add;
entry->chunk = chunk;
return 0;
-err_rule_insert:
- parman_item_remove(region->parman, &chunk->parman_prio,
- &entry->parman_item);
-err_parman_item_add:
+err_entry_add:
mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
return err;
}
@@ -943,13 +770,11 @@ err_parman_item_add:
static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_entry *entry)
{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
struct mlxsw_sp_acl_tcam_region *region = chunk->region;
- mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region,
- entry->parman_item.index);
- parman_item_remove(region->parman, &chunk->parman_prio,
- &entry->parman_item);
+ ops->entry_del(mlxsw_sp, region->priv, chunk->priv, entry->priv);
mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
}
@@ -958,22 +783,24 @@ mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_entry *entry,
bool *activity)
{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
struct mlxsw_sp_acl_tcam_region *region = chunk->region;
- return mlxsw_sp_acl_tcam_region_entry_activity_get(mlxsw_sp, region,
- entry->parman_item.index,
- activity);
+ return ops->entry_activity_get(mlxsw_sp, region->priv,
+ entry->priv, activity);
}
static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
- MLXSW_AFK_ELEMENT_DMAC,
- MLXSW_AFK_ELEMENT_SMAC,
+ MLXSW_AFK_ELEMENT_DMAC_32_47,
+ MLXSW_AFK_ELEMENT_DMAC_0_31,
+ MLXSW_AFK_ELEMENT_SMAC_32_47,
+ MLXSW_AFK_ELEMENT_SMAC_0_31,
MLXSW_AFK_ELEMENT_ETHERTYPE,
MLXSW_AFK_ELEMENT_IP_PROTO,
- MLXSW_AFK_ELEMENT_SRC_IP4,
- MLXSW_AFK_ELEMENT_DST_IP4,
+ MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ MLXSW_AFK_ELEMENT_DST_IP_0_31,
MLXSW_AFK_ELEMENT_DST_L4_PORT,
MLXSW_AFK_ELEMENT_SRC_L4_PORT,
MLXSW_AFK_ELEMENT_VID,
@@ -987,10 +814,14 @@ static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
MLXSW_AFK_ELEMENT_ETHERTYPE,
MLXSW_AFK_ELEMENT_IP_PROTO,
- MLXSW_AFK_ELEMENT_SRC_IP6_HI,
- MLXSW_AFK_ELEMENT_SRC_IP6_LO,
- MLXSW_AFK_ELEMENT_DST_IP6_HI,
- MLXSW_AFK_ELEMENT_DST_IP6_LO,
+ MLXSW_AFK_ELEMENT_SRC_IP_96_127,
+ MLXSW_AFK_ELEMENT_SRC_IP_64_95,
+ MLXSW_AFK_ELEMENT_SRC_IP_32_63,
+ MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ MLXSW_AFK_ELEMENT_DST_IP_96_127,
+ MLXSW_AFK_ELEMENT_DST_IP_64_95,
+ MLXSW_AFK_ELEMENT_DST_IP_32_63,
+ MLXSW_AFK_ELEMENT_DST_IP_0_31,
MLXSW_AFK_ELEMENT_DST_L4_PORT,
MLXSW_AFK_ELEMENT_SRC_L4_PORT,
};
@@ -1019,14 +850,16 @@ struct mlxsw_sp_acl_tcam_flower_rule {
static int
mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
- void *priv, void *ruleset_priv)
+ struct mlxsw_sp_acl_tcam *tcam,
+ void *ruleset_priv,
+ struct mlxsw_afk_element_usage *tmplt_elusage)
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
- struct mlxsw_sp_acl_tcam *tcam = priv;
return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
mlxsw_sp_acl_tcam_patterns,
- MLXSW_SP_ACL_TCAM_PATTERNS_COUNT);
+ MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
+ tmplt_elusage);
}
static void
@@ -1070,6 +903,12 @@ mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
return mlxsw_sp_acl_tcam_group_id(&ruleset->group);
}
+static size_t mlxsw_sp_acl_tcam_flower_rule_priv_size(struct mlxsw_sp *mlxsw_sp)
+{
+ return sizeof(struct mlxsw_sp_acl_tcam_flower_rule) +
+ mlxsw_sp_acl_tcam_entry_priv_size(mlxsw_sp);
+}
+
static int
mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
void *ruleset_priv, void *rule_priv,
@@ -1107,7 +946,7 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
.ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind,
.ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind,
.ruleset_group_id = mlxsw_sp_acl_tcam_flower_ruleset_group_id,
- .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
+ .rule_priv_size = mlxsw_sp_acl_tcam_flower_rule_priv_size,
.rule_add = mlxsw_sp_acl_tcam_flower_rule_add,
.rule_del = mlxsw_sp_acl_tcam_flower_rule_del,
.rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get,
@@ -1118,7 +957,7 @@ mlxsw_sp_acl_tcam_profile_ops_arr[] = {
[MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
};
-static const struct mlxsw_sp_acl_profile_ops *
+const struct mlxsw_sp_acl_profile_ops *
mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_acl_profile profile)
{
@@ -1131,10 +970,3 @@ mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
return NULL;
return ops;
}
-
-const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops = {
- .priv_size = sizeof(struct mlxsw_sp_acl_tcam),
- .init = mlxsw_sp_acl_tcam_init,
- .fini = mlxsw_sp_acl_tcam_fini,
- .profile_ops = mlxsw_sp_acl_tcam_profile_ops,
-};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
new file mode 100644
index 000000000000..219a4e26c332
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -0,0 +1,228 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_SPECTRUM_ACL_TCAM_H
+#define _MLXSW_SPECTRUM_ACL_TCAM_H
+
+#include <linux/list.h>
+#include <linux/parman.h>
+
+#include "reg.h"
+#include "spectrum.h"
+#include "core_acl_flex_keys.h"
+
+struct mlxsw_sp_acl_tcam {
+ unsigned long *used_regions; /* bit array */
+ unsigned int max_regions;
+ unsigned long *used_groups; /* bit array */
+ unsigned int max_groups;
+ unsigned int max_group_size;
+ unsigned long priv[0];
+ /* priv has to be always the last item */
+};
+
+size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam);
+void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam);
+int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u32 *priority, bool fillup_priority);
+
+struct mlxsw_sp_acl_profile_ops {
+ size_t ruleset_priv_size;
+ int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam, void *ruleset_priv,
+ struct mlxsw_afk_element_usage *tmplt_elusage);
+ void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
+ int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
+ void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
+ u16 (*ruleset_group_id)(void *ruleset_priv);
+ size_t (*rule_priv_size)(struct mlxsw_sp *mlxsw_sp);
+ int (*rule_add)(struct mlxsw_sp *mlxsw_sp,
+ void *ruleset_priv, void *rule_priv,
+ struct mlxsw_sp_acl_rule_info *rulei);
+ void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv);
+ int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv,
+ bool *activity);
+};
+
+const struct mlxsw_sp_acl_profile_ops *
+mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_acl_profile profile);
+
+#define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16
+#define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16
+
+#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U)
+
+#define MLXSW_SP_ACL_TCAM_MASK_LEN \
+ (MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN * BITS_PER_BYTE)
+
+struct mlxsw_sp_acl_tcam_group;
+
+struct mlxsw_sp_acl_tcam_region {
+ struct list_head list; /* Member of a TCAM group */
+ struct list_head chunk_list; /* List of chunks under this region */
+ struct mlxsw_sp_acl_tcam_group *group;
+ enum mlxsw_reg_ptar_key_type key_type;
+ u16 id; /* ACL ID and region ID - they are same */
+ char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN];
+ struct mlxsw_afk_key_info *key_info;
+ struct mlxsw_sp *mlxsw_sp;
+ unsigned long priv[0];
+ /* priv has to be always the last item */
+};
+
+struct mlxsw_sp_acl_ctcam_region {
+ struct parman *parman;
+ const struct mlxsw_sp_acl_ctcam_region_ops *ops;
+ struct mlxsw_sp_acl_tcam_region *region;
+};
+
+struct mlxsw_sp_acl_ctcam_chunk {
+ struct parman_prio parman_prio;
+};
+
+struct mlxsw_sp_acl_ctcam_entry {
+ struct parman_item parman_item;
+};
+
+struct mlxsw_sp_acl_ctcam_region_ops {
+ int (*entry_insert)(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ const char *mask);
+ void (*entry_remove)(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry);
+};
+
+int
+mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_tcam_region *region,
+ const struct mlxsw_sp_acl_ctcam_region_ops *ops);
+void mlxsw_sp_acl_ctcam_region_fini(struct mlxsw_sp_acl_ctcam_region *cregion);
+void mlxsw_sp_acl_ctcam_chunk_init(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ unsigned int priority);
+void mlxsw_sp_acl_ctcam_chunk_fini(struct mlxsw_sp_acl_ctcam_chunk *cchunk);
+int mlxsw_sp_acl_ctcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ bool fillup_priority);
+void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ struct mlxsw_sp_acl_ctcam_entry *centry);
+static inline unsigned int
+mlxsw_sp_acl_ctcam_entry_offset(struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+ return centry->parman_item.index;
+}
+
+enum mlxsw_sp_acl_atcam_region_type {
+ MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB,
+ MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB,
+ MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB,
+ MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB,
+ __MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX,
+};
+
+#define MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX \
+ (__MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX - 1)
+
+struct mlxsw_sp_acl_atcam {
+ struct mlxsw_sp_acl_erp_core *erp_core;
+};
+
+struct mlxsw_sp_acl_atcam_region {
+ struct rhashtable entries_ht; /* A-TCAM only */
+ struct mlxsw_sp_acl_ctcam_region cregion;
+ const struct mlxsw_sp_acl_atcam_region_ops *ops;
+ struct mlxsw_sp_acl_tcam_region *region;
+ struct mlxsw_sp_acl_atcam *atcam;
+ enum mlxsw_sp_acl_atcam_region_type type;
+ struct mlxsw_sp_acl_erp_table *erp_table;
+ void *priv;
+};
+
+struct mlxsw_sp_acl_atcam_entry_ht_key {
+ char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key */
+ u8 erp_id;
+};
+
+struct mlxsw_sp_acl_atcam_chunk {
+ struct mlxsw_sp_acl_ctcam_chunk cchunk;
+};
+
+struct mlxsw_sp_acl_atcam_entry {
+ struct rhash_head ht_node;
+ struct mlxsw_sp_acl_atcam_entry_ht_key ht_key;
+ struct mlxsw_sp_acl_ctcam_entry centry;
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
+ struct mlxsw_sp_acl_erp *erp;
+};
+
+static inline struct mlxsw_sp_acl_atcam_region *
+mlxsw_sp_acl_tcam_cregion_aregion(struct mlxsw_sp_acl_ctcam_region *cregion)
+{
+ return container_of(cregion, struct mlxsw_sp_acl_atcam_region, cregion);
+}
+
+static inline struct mlxsw_sp_acl_atcam_entry *
+mlxsw_sp_acl_tcam_centry_aentry(struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+ return container_of(centry, struct mlxsw_sp_acl_atcam_entry, centry);
+}
+
+int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp,
+ u16 region_id);
+int
+mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_tcam_region *region,
+ const struct mlxsw_sp_acl_ctcam_region_ops *ops);
+void mlxsw_sp_acl_atcam_region_fini(struct mlxsw_sp_acl_atcam_region *aregion);
+void mlxsw_sp_acl_atcam_chunk_init(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ unsigned int priority);
+void mlxsw_sp_acl_atcam_chunk_fini(struct mlxsw_sp_acl_atcam_chunk *achunk);
+int mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei);
+void mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ struct mlxsw_sp_acl_atcam_entry *aentry);
+int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam);
+void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam);
+
+struct mlxsw_sp_acl_erp;
+
+bool mlxsw_sp_acl_erp_is_ctcam_erp(const struct mlxsw_sp_acl_erp *erp);
+u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp);
+struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion,
+ const char *mask, bool ctcam);
+void mlxsw_sp_acl_erp_put(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp *erp);
+int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion);
+void mlxsw_sp_acl_erp_region_fini(struct mlxsw_sp_acl_atcam_region *aregion);
+int mlxsw_sp_acl_erps_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam);
+void mlxsw_sp_acl_erps_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 0a9adc5962fb..4327487553c5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/types.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
index 0f46775e0307..83c2e1e5f216 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Arkadi Sharshevsky <arkadis@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/bitops.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
index fd34d0a01073..81465e267b10 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Arkadi Sharshevsky <arkdis@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_SPECTRUM_CNT_H
#define _MLXSW_SPECTRUM_CNT_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
index b6ed7f7c531e..b25048c6c761 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#include <linux/netdevice.h>
#include <linux/string.h>
@@ -255,6 +224,270 @@ static int mlxsw_sp_dcbnl_ieee_setets(struct net_device *dev,
return 0;
}
+static int mlxsw_sp_dcbnl_app_validate(struct net_device *dev,
+ struct dcb_app *app)
+{
+ int prio;
+
+ if (app->priority >= IEEE_8021QAZ_MAX_TCS) {
+ netdev_err(dev, "APP entry with priority value %u is invalid\n",
+ app->priority);
+ return -EINVAL;
+ }
+
+ switch (app->selector) {
+ case IEEE_8021QAZ_APP_SEL_DSCP:
+ if (app->protocol >= 64) {
+ netdev_err(dev, "DSCP APP entry with protocol value %u is invalid\n",
+ app->protocol);
+ return -EINVAL;
+ }
+
+ /* Warn about any DSCP APP entries with the same PID. */
+ prio = fls(dcb_ieee_getapp_mask(dev, app));
+ if (prio--) {
+ if (prio < app->priority)
+ netdev_warn(dev, "Choosing priority %d for DSCP %d in favor of previously-active value of %d\n",
+ app->priority, app->protocol, prio);
+ else if (prio > app->priority)
+ netdev_warn(dev, "Ignoring new priority %d for DSCP %d in favor of current value of %d\n",
+ app->priority, app->protocol, prio);
+ }
+ break;
+
+ case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
+ if (app->protocol) {
+ netdev_err(dev, "EtherType APP entries with protocol value != 0 not supported\n");
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ netdev_err(dev, "APP entries with selector %u not supported\n",
+ app->selector);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static u8
+mlxsw_sp_port_dcb_app_default_prio(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ u8 prio_mask;
+
+ prio_mask = dcb_ieee_getapp_default_prio_mask(mlxsw_sp_port->dev);
+ if (prio_mask)
+ /* Take the highest configured priority. */
+ return fls(prio_mask) - 1;
+
+ return 0;
+}
+
+static void
+mlxsw_sp_port_dcb_app_dscp_prio_map(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 default_prio,
+ struct dcb_ieee_app_dscp_map *map)
+{
+ int i;
+
+ dcb_ieee_getapp_dscp_prio_mask_map(mlxsw_sp_port->dev, map);
+ for (i = 0; i < ARRAY_SIZE(map->map); ++i) {
+ if (map->map[i])
+ map->map[i] = fls(map->map[i]) - 1;
+ else
+ map->map[i] = default_prio;
+ }
+}
+
+static bool
+mlxsw_sp_port_dcb_app_prio_dscp_map(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct dcb_ieee_app_prio_map *map)
+{
+ bool have_dscp = false;
+ int i;
+
+ dcb_ieee_getapp_prio_dscp_mask_map(mlxsw_sp_port->dev, map);
+ for (i = 0; i < ARRAY_SIZE(map->map); ++i) {
+ if (map->map[i]) {
+ map->map[i] = fls64(map->map[i]) - 1;
+ have_dscp = true;
+ }
+ }
+
+ return have_dscp;
+}
+
+static int
+mlxsw_sp_port_dcb_app_update_qpts(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_qpts_trust_state ts)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qpts_pl[MLXSW_REG_QPTS_LEN];
+
+ mlxsw_reg_qpts_pack(qpts_pl, mlxsw_sp_port->local_port, ts);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpts), qpts_pl);
+}
+
+static int
+mlxsw_sp_port_dcb_app_update_qrwe(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool rewrite_dscp)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qrwe_pl[MLXSW_REG_QRWE_LEN];
+
+ mlxsw_reg_qrwe_pack(qrwe_pl, mlxsw_sp_port->local_port,
+ false, rewrite_dscp);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qrwe), qrwe_pl);
+}
+
+static int
+mlxsw_sp_port_dcb_toggle_trust(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_qpts_trust_state ts)
+{
+ bool rewrite_dscp = ts == MLXSW_REG_QPTS_TRUST_STATE_DSCP;
+ int err;
+
+ if (mlxsw_sp_port->dcb.trust_state == ts)
+ return 0;
+
+ err = mlxsw_sp_port_dcb_app_update_qpts(mlxsw_sp_port, ts);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_port_dcb_app_update_qrwe(mlxsw_sp_port, rewrite_dscp);
+ if (err)
+ goto err_update_qrwe;
+
+ mlxsw_sp_port->dcb.trust_state = ts;
+ return 0;
+
+err_update_qrwe:
+ mlxsw_sp_port_dcb_app_update_qpts(mlxsw_sp_port,
+ mlxsw_sp_port->dcb.trust_state);
+ return err;
+}
+
+static int
+mlxsw_sp_port_dcb_app_update_qpdpm(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct dcb_ieee_app_dscp_map *map)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qpdpm_pl[MLXSW_REG_QPDPM_LEN];
+ short int i;
+
+ mlxsw_reg_qpdpm_pack(qpdpm_pl, mlxsw_sp_port->local_port);
+ for (i = 0; i < ARRAY_SIZE(map->map); ++i)
+ mlxsw_reg_qpdpm_dscp_pack(qpdpm_pl, i, map->map[i]);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpdpm), qpdpm_pl);
+}
+
+static int
+mlxsw_sp_port_dcb_app_update_qpdsm(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct dcb_ieee_app_prio_map *map)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qpdsm_pl[MLXSW_REG_QPDSM_LEN];
+ short int i;
+
+ mlxsw_reg_qpdsm_pack(qpdsm_pl, mlxsw_sp_port->local_port);
+ for (i = 0; i < ARRAY_SIZE(map->map); ++i)
+ mlxsw_reg_qpdsm_prio_pack(qpdsm_pl, i, map->map[i]);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpdsm), qpdsm_pl);
+}
+
+static int mlxsw_sp_port_dcb_app_update(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct dcb_ieee_app_prio_map prio_map;
+ struct dcb_ieee_app_dscp_map dscp_map;
+ u8 default_prio;
+ bool have_dscp;
+ int err;
+
+ default_prio = mlxsw_sp_port_dcb_app_default_prio(mlxsw_sp_port);
+ have_dscp = mlxsw_sp_port_dcb_app_prio_dscp_map(mlxsw_sp_port,
+ &prio_map);
+
+ if (!have_dscp) {
+ err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
+ MLXSW_REG_QPTS_TRUST_STATE_PCP);
+ if (err)
+ netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L2\n");
+ return err;
+ }
+
+ mlxsw_sp_port_dcb_app_dscp_prio_map(mlxsw_sp_port, default_prio,
+ &dscp_map);
+ err = mlxsw_sp_port_dcb_app_update_qpdpm(mlxsw_sp_port,
+ &dscp_map);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Couldn't configure priority map\n");
+ return err;
+ }
+
+ err = mlxsw_sp_port_dcb_app_update_qpdsm(mlxsw_sp_port,
+ &prio_map);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Couldn't configure DSCP rewrite map\n");
+ return err;
+ }
+
+ err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
+ MLXSW_REG_QPTS_TRUST_STATE_DSCP);
+ if (err) {
+ /* A failure to set trust DSCP means that the QPDPM and QPDSM
+ * maps installed above are not in effect. And since we are here
+ * attempting to set trust DSCP, we couldn't have attempted to
+ * switch trust to PCP. Thus no cleanup is necessary.
+ */
+ netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L3\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_dcbnl_ieee_setapp(struct net_device *dev,
+ struct dcb_app *app)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err;
+
+ err = mlxsw_sp_dcbnl_app_validate(dev, app);
+ if (err)
+ return err;
+
+ err = dcb_ieee_setapp(dev, app);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_port_dcb_app_update(mlxsw_sp_port);
+ if (err)
+ goto err_update;
+
+ return 0;
+
+err_update:
+ dcb_ieee_delapp(dev, app);
+ return err;
+}
+
+static int mlxsw_sp_dcbnl_ieee_delapp(struct net_device *dev,
+ struct dcb_app *app)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err;
+
+ err = dcb_ieee_delapp(dev, app);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_port_dcb_app_update(mlxsw_sp_port);
+ if (err)
+ netdev_err(dev, "Failed to update DCB APP configuration\n");
+ return 0;
+}
+
static int mlxsw_sp_dcbnl_ieee_getmaxrate(struct net_device *dev,
struct ieee_maxrate *maxrate)
{
@@ -394,6 +627,8 @@ static const struct dcbnl_rtnl_ops mlxsw_sp_dcbnl_ops = {
.ieee_setmaxrate = mlxsw_sp_dcbnl_ieee_setmaxrate,
.ieee_getpfc = mlxsw_sp_dcbnl_ieee_getpfc,
.ieee_setpfc = mlxsw_sp_dcbnl_ieee_setpfc,
+ .ieee_setapp = mlxsw_sp_dcbnl_ieee_setapp,
+ .ieee_delapp = mlxsw_sp_dcbnl_ieee_delapp,
.getdcbx = mlxsw_sp_dcbnl_getdcbx,
.setdcbx = mlxsw_sp_dcbnl_setdcbx,
@@ -467,6 +702,7 @@ int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port)
if (err)
goto err_port_pfc_init;
+ mlxsw_sp_port->dcb.trust_state = MLXSW_REG_QPTS_TRUST_STATE_PCP;
mlxsw_sp_port->dev->dcbnl_ops = &mlxsw_sp_dcbnl_ops;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index f56fa18d6b26..41e607a14846 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Arkadi Sharshevsky <arakdis@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <net/devlink.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
index 815d543cf114..e689576231ab 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Arkadi Sharshevsky <arkadis@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_PIPELINE_H_
#define _MLXSW_PIPELINE_H_
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index 54262af4e98f..715d24ff937e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Ido Schimmel <idosch@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/bitops.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 89dbf569dff5..ebd1b24ebaa5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -48,7 +17,8 @@
static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_acl_rule_info *rulei,
- struct tcf_exts *exts)
+ struct tcf_exts *exts,
+ struct netlink_ext_ack *extack)
{
const struct tc_action *a;
LIST_HEAD(actions);
@@ -58,7 +28,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return 0;
/* Count action is inserted first */
- err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei);
+ err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
if (err)
return err;
@@ -66,16 +36,22 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
list_for_each_entry(a, &actions, list) {
if (is_tcf_gact_ok(a)) {
err = mlxsw_sp_acl_rulei_act_terminate(rulei);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action");
return err;
+ }
} else if (is_tcf_gact_shot(a)) {
err = mlxsw_sp_acl_rulei_act_drop(rulei);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action");
return err;
+ }
} else if (is_tcf_gact_trap(a)) {
err = mlxsw_sp_acl_rulei_act_trap(rulei);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action");
return err;
+ }
} else if (is_tcf_gact_goto_chain(a)) {
u32 chain_index = tcf_gact_goto_chain_index(a);
struct mlxsw_sp_acl_ruleset *ruleset;
@@ -89,8 +65,10 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action");
return err;
+ }
} else if (is_tcf_mirred_egress_redirect(a)) {
struct net_device *out_dev;
struct mlxsw_sp_fid *fid;
@@ -99,20 +77,21 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
fid_index = mlxsw_sp_fid_index(fid);
err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
- fid_index);
+ fid_index, extack);
if (err)
return err;
out_dev = tcf_mirred_dev(a);
err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
- out_dev);
+ out_dev, extack);
if (err)
return err;
} else if (is_tcf_mirred_egress_mirror(a)) {
struct net_device *out_dev = tcf_mirred_dev(a);
err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
- block, out_dev);
+ block, out_dev,
+ extack);
if (err)
return err;
} else if (is_tcf_vlan(a)) {
@@ -123,8 +102,9 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
action, vid,
- proto, prio);
+ proto, prio, extack);
} else {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
return -EOPNOTSUPP;
}
@@ -144,10 +124,12 @@ static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
FLOW_DISSECTOR_KEY_IPV4_ADDRS,
f->mask);
- mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_IP4,
- ntohl(key->src), ntohl(mask->src));
- mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_IP4,
- ntohl(key->dst), ntohl(mask->dst));
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ (char *) &key->src,
+ (char *) &mask->src, 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
+ (char *) &key->dst,
+ (char *) &mask->dst, 4);
}
static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
@@ -161,24 +143,31 @@ static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_IPV6_ADDRS,
f->mask);
- size_t addr_half_size = sizeof(key->src) / 2;
-
- mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_HI,
- &key->src.s6_addr[0],
- &mask->src.s6_addr[0],
- addr_half_size);
- mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_LO,
- &key->src.s6_addr[addr_half_size],
- &mask->src.s6_addr[addr_half_size],
- addr_half_size);
- mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_HI,
- &key->dst.s6_addr[0],
- &mask->dst.s6_addr[0],
- addr_half_size);
- mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_LO,
- &key->dst.s6_addr[addr_half_size],
- &mask->dst.s6_addr[addr_half_size],
- addr_half_size);
+
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
+ &key->src.s6_addr[0x0],
+ &mask->src.s6_addr[0x0], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95,
+ &key->src.s6_addr[0x4],
+ &mask->src.s6_addr[0x4], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63,
+ &key->src.s6_addr[0x8],
+ &mask->src.s6_addr[0x8], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ &key->src.s6_addr[0xC],
+ &mask->src.s6_addr[0xC], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127,
+ &key->dst.s6_addr[0x0],
+ &mask->dst.s6_addr[0x0], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95,
+ &key->dst.s6_addr[0x4],
+ &mask->dst.s6_addr[0x4], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63,
+ &key->dst.s6_addr[0x8],
+ &mask->dst.s6_addr[0x8], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
+ &key->dst.s6_addr[0xC],
+ &mask->dst.s6_addr[0xC], 4);
}
static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
@@ -192,6 +181,7 @@ static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
return 0;
if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported");
dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
return -EINVAL;
}
@@ -220,6 +210,7 @@ static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
return 0;
if (ip_proto != IPPROTO_TCP) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP");
dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n");
return -EINVAL;
}
@@ -246,6 +237,7 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
return 0;
if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6");
dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n");
return -EINVAL;
}
@@ -290,6 +282,7 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
BIT(FLOW_DISSECTOR_KEY_IP) |
BIT(FLOW_DISSECTOR_KEY_VLAN))) {
dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
return -EOPNOTSUPP;
}
@@ -340,13 +333,17 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
f->mask);
mlxsw_sp_acl_rulei_keymask_buf(rulei,
- MLXSW_AFK_ELEMENT_DMAC,
- key->dst, mask->dst,
- sizeof(key->dst));
+ MLXSW_AFK_ELEMENT_DMAC_32_47,
+ key->dst, mask->dst, 2);
mlxsw_sp_acl_rulei_keymask_buf(rulei,
- MLXSW_AFK_ELEMENT_SMAC,
- key->src, mask->src,
- sizeof(key->src));
+ MLXSW_AFK_ELEMENT_DMAC_0_31,
+ key->dst + 2, mask->dst + 2, 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei,
+ MLXSW_AFK_ELEMENT_SMAC_32_47,
+ key->src, mask->src, 2);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei,
+ MLXSW_AFK_ELEMENT_SMAC_0_31,
+ key->src + 2, mask->src + 2, 4);
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
@@ -358,6 +355,11 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_VLAN,
f->mask);
+
+ if (mlxsw_sp_acl_block_is_egress_bound(block)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
+ return -EOPNOTSUPP;
+ }
if (mask->vlan_id != 0)
mlxsw_sp_acl_rulei_keymask_u32(rulei,
MLXSW_AFK_ELEMENT_VID,
@@ -387,7 +389,8 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts);
+ return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts,
+ f->common.extack);
}
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
@@ -401,11 +404,12 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
f->common.chain_index,
- MLXSW_SP_ACL_PROFILE_FLOWER);
+ MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
if (IS_ERR(ruleset))
return PTR_ERR(ruleset);
- rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie);
+ rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie,
+ f->common.extack);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
goto err_rule_create;
@@ -445,7 +449,7 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
f->common.chain_index,
- MLXSW_SP_ACL_PROFILE_FLOWER);
+ MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
if (IS_ERR(ruleset))
return;
@@ -471,7 +475,7 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
f->common.chain_index,
- MLXSW_SP_ACL_PROFILE_FLOWER);
+ MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
if (WARN_ON(IS_ERR(ruleset)))
return -EINVAL;
@@ -493,3 +497,40 @@ err_rule_get_stats:
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
return err;
}
+
+int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct tc_cls_flower_offload *f)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset;
+ struct mlxsw_sp_acl_rule_info rulei;
+ int err;
+
+ memset(&rulei, 0, sizeof(rulei));
+ err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f);
+ if (err)
+ return err;
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
+ f->common.chain_index,
+ MLXSW_SP_ACL_PROFILE_FLOWER,
+ &rulei.values.elusage);
+
+ /* keep the reference to the ruleset */
+ return PTR_ERR_OR_ZERO(ruleset);
+}
+
+void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct tc_cls_flower_offload *f)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset;
+
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
+ f->common.chain_index,
+ MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
+ if (IS_ERR(ruleset))
+ return;
+ /* put the reference to the ruleset kept in create */
+ mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
+ mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index 98d896c14b87..00db26c96bf5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
- * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <net/ip_tunnels.h>
#include <net/ip6_tunnel.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
index 6909d867bb59..bb5c4d4a5872 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
- * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_IPIP_H_
#define _MLXSW_IPIP_H_
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
index fe4327f547d2..1e4cdee7bcd7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
@@ -1,454 +1,75 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
-#include <linux/bitops.h>
+#include <linux/slab.h>
#include "spectrum.h"
-#define MLXSW_SP_KVDL_SINGLE_BASE 0
-#define MLXSW_SP_KVDL_SINGLE_SIZE 16384
-#define MLXSW_SP_KVDL_SINGLE_END \
- (MLXSW_SP_KVDL_SINGLE_SIZE + MLXSW_SP_KVDL_SINGLE_BASE - 1)
-
-#define MLXSW_SP_KVDL_CHUNKS_BASE \
- (MLXSW_SP_KVDL_SINGLE_BASE + MLXSW_SP_KVDL_SINGLE_SIZE)
-#define MLXSW_SP_KVDL_CHUNKS_SIZE 49152
-#define MLXSW_SP_KVDL_CHUNKS_END \
- (MLXSW_SP_KVDL_CHUNKS_SIZE + MLXSW_SP_KVDL_CHUNKS_BASE - 1)
-
-#define MLXSW_SP_KVDL_LARGE_CHUNKS_BASE \
- (MLXSW_SP_KVDL_CHUNKS_BASE + MLXSW_SP_KVDL_CHUNKS_SIZE)
-#define MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE \
- (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP_KVDL_LARGE_CHUNKS_BASE)
-#define MLXSW_SP_KVDL_LARGE_CHUNKS_END \
- (MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE + MLXSW_SP_KVDL_LARGE_CHUNKS_BASE - 1)
-
-#define MLXSW_SP_KVDL_SINGLE_ALLOC_SIZE 1
-#define MLXSW_SP_KVDL_CHUNKS_ALLOC_SIZE 32
-#define MLXSW_SP_KVDL_LARGE_CHUNKS_ALLOC_SIZE 512
-
-struct mlxsw_sp_kvdl_part_info {
- unsigned int part_index;
- unsigned int start_index;
- unsigned int end_index;
- unsigned int alloc_size;
- enum mlxsw_sp_resource_id resource_id;
-};
-
-enum mlxsw_sp_kvdl_part_id {
- MLXSW_SP_KVDL_PART_ID_SINGLE,
- MLXSW_SP_KVDL_PART_ID_CHUNKS,
- MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS,
-};
-
-#define MLXSW_SP_KVDL_PART_INFO(id) \
-[MLXSW_SP_KVDL_PART_ID_##id] = { \
- .start_index = MLXSW_SP_KVDL_##id##_BASE, \
- .end_index = MLXSW_SP_KVDL_##id##_END, \
- .alloc_size = MLXSW_SP_KVDL_##id##_ALLOC_SIZE, \
- .resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_##id, \
-}
-
-static const struct mlxsw_sp_kvdl_part_info mlxsw_sp_kvdl_parts_info[] = {
- MLXSW_SP_KVDL_PART_INFO(SINGLE),
- MLXSW_SP_KVDL_PART_INFO(CHUNKS),
- MLXSW_SP_KVDL_PART_INFO(LARGE_CHUNKS),
-};
-
-#define MLXSW_SP_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp_kvdl_parts_info)
-
-struct mlxsw_sp_kvdl_part {
- struct mlxsw_sp_kvdl_part_info info;
- unsigned long usage[0]; /* Entries */
-};
-
struct mlxsw_sp_kvdl {
- struct mlxsw_sp_kvdl_part *parts[MLXSW_SP_KVDL_PARTS_INFO_LEN];
+ const struct mlxsw_sp_kvdl_ops *kvdl_ops;
+ unsigned long priv[0];
+ /* priv has to be always the last item */
};
-static struct mlxsw_sp_kvdl_part *
-mlxsw_sp_kvdl_alloc_size_part(struct mlxsw_sp_kvdl *kvdl,
- unsigned int alloc_size)
-{
- struct mlxsw_sp_kvdl_part *part, *min_part = NULL;
- int i;
-
- for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) {
- part = kvdl->parts[i];
- if (alloc_size <= part->info.alloc_size &&
- (!min_part ||
- part->info.alloc_size <= min_part->info.alloc_size))
- min_part = part;
- }
-
- return min_part ?: ERR_PTR(-ENOBUFS);
-}
-
-static struct mlxsw_sp_kvdl_part *
-mlxsw_sp_kvdl_index_part(struct mlxsw_sp_kvdl *kvdl, u32 kvdl_index)
-{
- struct mlxsw_sp_kvdl_part *part;
- int i;
-
- for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) {
- part = kvdl->parts[i];
- if (kvdl_index >= part->info.start_index &&
- kvdl_index <= part->info.end_index)
- return part;
- }
-
- return ERR_PTR(-EINVAL);
-}
-
-static u32
-mlxsw_sp_entry_index_kvdl_index(const struct mlxsw_sp_kvdl_part_info *info,
- unsigned int entry_index)
-{
- return info->start_index + entry_index * info->alloc_size;
-}
-
-static unsigned int
-mlxsw_sp_kvdl_index_entry_index(const struct mlxsw_sp_kvdl_part_info *info,
- u32 kvdl_index)
-{
- return (kvdl_index - info->start_index) / info->alloc_size;
-}
-
-static int mlxsw_sp_kvdl_part_alloc(struct mlxsw_sp_kvdl_part *part,
- u32 *p_kvdl_index)
-{
- const struct mlxsw_sp_kvdl_part_info *info = &part->info;
- unsigned int entry_index, nr_entries;
-
- nr_entries = (info->end_index - info->start_index + 1) /
- info->alloc_size;
- entry_index = find_first_zero_bit(part->usage, nr_entries);
- if (entry_index == nr_entries)
- return -ENOBUFS;
- __set_bit(entry_index, part->usage);
-
- *p_kvdl_index = mlxsw_sp_entry_index_kvdl_index(info, entry_index);
-
- return 0;
-}
-
-static void mlxsw_sp_kvdl_part_free(struct mlxsw_sp_kvdl_part *part,
- u32 kvdl_index)
-{
- const struct mlxsw_sp_kvdl_part_info *info = &part->info;
- unsigned int entry_index;
-
- entry_index = mlxsw_sp_kvdl_index_entry_index(info, kvdl_index);
- __clear_bit(entry_index, part->usage);
-}
-
-int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
- u32 *p_entry_index)
-{
- struct mlxsw_sp_kvdl_part *part;
-
- /* Find partition with smallest allocation size satisfying the
- * requested size.
- */
- part = mlxsw_sp_kvdl_alloc_size_part(mlxsw_sp->kvdl, entry_count);
- if (IS_ERR(part))
- return PTR_ERR(part);
-
- return mlxsw_sp_kvdl_part_alloc(part, p_entry_index);
-}
-
-void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index)
-{
- struct mlxsw_sp_kvdl_part *part;
-
- part = mlxsw_sp_kvdl_index_part(mlxsw_sp->kvdl, entry_index);
- if (IS_ERR(part))
- return;
- mlxsw_sp_kvdl_part_free(part, entry_index);
-}
-
-int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
- unsigned int entry_count,
- unsigned int *p_alloc_size)
-{
- struct mlxsw_sp_kvdl_part *part;
-
- part = mlxsw_sp_kvdl_alloc_size_part(mlxsw_sp->kvdl, entry_count);
- if (IS_ERR(part))
- return PTR_ERR(part);
-
- *p_alloc_size = part->info.alloc_size;
-
- return 0;
-}
-
-static void mlxsw_sp_kvdl_part_update(struct mlxsw_sp_kvdl_part *part,
- struct mlxsw_sp_kvdl_part *part_prev,
- unsigned int size)
-{
-
- if (!part_prev) {
- part->info.end_index = size - 1;
- } else {
- part->info.start_index = part_prev->info.end_index + 1;
- part->info.end_index = part->info.start_index + size - 1;
- }
-}
-
-static struct mlxsw_sp_kvdl_part *
-mlxsw_sp_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_kvdl_part_info *info,
- struct mlxsw_sp_kvdl_part *part_prev)
+int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
{
- struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- struct mlxsw_sp_kvdl_part *part;
- bool need_update = true;
- unsigned int nr_entries;
- size_t usage_size;
- u64 resource_size;
+ const struct mlxsw_sp_kvdl_ops *kvdl_ops = mlxsw_sp->kvdl_ops;
+ struct mlxsw_sp_kvdl *kvdl;
int err;
- err = devlink_resource_size_get(devlink, info->resource_id,
- &resource_size);
- if (err) {
- need_update = false;
- resource_size = info->end_index - info->start_index + 1;
- }
-
- nr_entries = div_u64(resource_size, info->alloc_size);
- usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long);
- part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
- if (!part)
- return ERR_PTR(-ENOMEM);
-
- memcpy(&part->info, info, sizeof(part->info));
-
- if (need_update)
- mlxsw_sp_kvdl_part_update(part, part_prev, resource_size);
- return part;
-}
-
-static void mlxsw_sp_kvdl_part_fini(struct mlxsw_sp_kvdl_part *part)
-{
- kfree(part);
-}
-
-static int mlxsw_sp_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp)
-{
- struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
- const struct mlxsw_sp_kvdl_part_info *info;
- struct mlxsw_sp_kvdl_part *part_prev = NULL;
- int err, i;
+ kvdl = kzalloc(sizeof(*mlxsw_sp->kvdl) + kvdl_ops->priv_size,
+ GFP_KERNEL);
+ if (!kvdl)
+ return -ENOMEM;
+ kvdl->kvdl_ops = kvdl_ops;
+ mlxsw_sp->kvdl = kvdl;
- for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) {
- info = &mlxsw_sp_kvdl_parts_info[i];
- kvdl->parts[i] = mlxsw_sp_kvdl_part_init(mlxsw_sp, info,
- part_prev);
- if (IS_ERR(kvdl->parts[i])) {
- err = PTR_ERR(kvdl->parts[i]);
- goto err_kvdl_part_init;
- }
- part_prev = kvdl->parts[i];
- }
+ err = kvdl_ops->init(mlxsw_sp, kvdl->priv);
+ if (err)
+ goto err_init;
return 0;
-err_kvdl_part_init:
- for (i--; i >= 0; i--)
- mlxsw_sp_kvdl_part_fini(kvdl->parts[i]);
+err_init:
+ kfree(kvdl);
return err;
}
-static void mlxsw_sp_kvdl_parts_fini(struct mlxsw_sp *mlxsw_sp)
+void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
- int i;
-
- for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++)
- mlxsw_sp_kvdl_part_fini(kvdl->parts[i]);
-}
-
-static u64 mlxsw_sp_kvdl_part_occ(struct mlxsw_sp_kvdl_part *part)
-{
- const struct mlxsw_sp_kvdl_part_info *info = &part->info;
- unsigned int nr_entries;
- int bit = -1;
- u64 occ = 0;
-
- nr_entries = (info->end_index -
- info->start_index + 1) /
- info->alloc_size;
- while ((bit = find_next_bit(part->usage, nr_entries, bit + 1))
- < nr_entries)
- occ += info->alloc_size;
- return occ;
-}
-
-static u64 mlxsw_sp_kvdl_occ_get(void *priv)
-{
- const struct mlxsw_sp *mlxsw_sp = priv;
- u64 occ = 0;
- int i;
-
- for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++)
- occ += mlxsw_sp_kvdl_part_occ(mlxsw_sp->kvdl->parts[i]);
-
- return occ;
-}
-
-static u64 mlxsw_sp_kvdl_single_occ_get(void *priv)
-{
- const struct mlxsw_sp *mlxsw_sp = priv;
- struct mlxsw_sp_kvdl_part *part;
-
- part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_SINGLE];
- return mlxsw_sp_kvdl_part_occ(part);
-}
-
-static u64 mlxsw_sp_kvdl_chunks_occ_get(void *priv)
-{
- const struct mlxsw_sp *mlxsw_sp = priv;
- struct mlxsw_sp_kvdl_part *part;
-
- part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_CHUNKS];
- return mlxsw_sp_kvdl_part_occ(part);
-}
-
-static u64 mlxsw_sp_kvdl_large_chunks_occ_get(void *priv)
-{
- const struct mlxsw_sp *mlxsw_sp = priv;
- struct mlxsw_sp_kvdl_part *part;
- part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS];
- return mlxsw_sp_kvdl_part_occ(part);
+ kvdl->kvdl_ops->fini(mlxsw_sp, kvdl->priv);
+ kfree(kvdl);
}
-int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
+int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, u32 *p_entry_index)
{
- struct devlink *devlink = priv_to_devlink(mlxsw_core);
- static struct devlink_resource_size_params size_params;
- u32 kvdl_max_size;
- int err;
-
- kvdl_max_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
- MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) -
- MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE);
-
- devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
- MLXSW_SP_KVDL_SINGLE_ALLOC_SIZE,
- DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES,
- MLXSW_SP_KVDL_SINGLE_SIZE,
- MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params);
- if (err)
- return err;
-
- devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
- MLXSW_SP_KVDL_CHUNKS_ALLOC_SIZE,
- DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS,
- MLXSW_SP_KVDL_CHUNKS_SIZE,
- MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params);
- if (err)
- return err;
+ struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
- devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
- MLXSW_SP_KVDL_LARGE_CHUNKS_ALLOC_SIZE,
- DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS,
- MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE,
- MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params);
- return err;
+ return kvdl->kvdl_ops->alloc(mlxsw_sp, kvdl->priv, type,
+ entry_count, p_entry_index);
}
-int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
+void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, int entry_index)
{
- struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- struct mlxsw_sp_kvdl *kvdl;
- int err;
-
- kvdl = kzalloc(sizeof(*mlxsw_sp->kvdl), GFP_KERNEL);
- if (!kvdl)
- return -ENOMEM;
- mlxsw_sp->kvdl = kvdl;
-
- err = mlxsw_sp_kvdl_parts_init(mlxsw_sp);
- if (err)
- goto err_kvdl_parts_init;
-
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- mlxsw_sp_kvdl_occ_get,
- mlxsw_sp);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
- mlxsw_sp_kvdl_single_occ_get,
- mlxsw_sp);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
- mlxsw_sp_kvdl_chunks_occ_get,
- mlxsw_sp);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
- mlxsw_sp_kvdl_large_chunks_occ_get,
- mlxsw_sp);
-
- return 0;
+ struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
-err_kvdl_parts_init:
- kfree(mlxsw_sp->kvdl);
- return err;
+ kvdl->kvdl_ops->free(mlxsw_sp, kvdl->priv, type,
+ entry_count, entry_index);
}
-void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp)
+int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ unsigned int *p_alloc_count)
{
- struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR);
- mlxsw_sp_kvdl_parts_fini(mlxsw_sp);
- kfree(mlxsw_sp->kvdl);
+ return kvdl->kvdl_ops->alloc_size_query(mlxsw_sp, kvdl->priv, type,
+ entry_count, p_alloc_count);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
index a82539609d49..54275624718b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/rhashtable.h>
#include <net/ipv6.h>
@@ -1075,6 +1044,6 @@ void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp)
struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
cancel_delayed_work_sync(&mr->stats_update_dw);
- mr->mr_ops->fini(mr->priv);
+ mr->mr_ops->fini(mlxsw_sp, mr->priv);
kfree(mr);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
index 7c864a86811d..3cde3671fe35 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_SPECTRUM_MCROUTER_H
#define _MLXSW_SPECTRUM_MCROUTER_H
@@ -46,15 +15,6 @@ enum mlxsw_sp_mr_route_action {
MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD,
};
-enum mlxsw_sp_mr_route_prio {
- MLXSW_SP_MR_ROUTE_PRIO_SG,
- MLXSW_SP_MR_ROUTE_PRIO_STARG,
- MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
- __MLXSW_SP_MR_ROUTE_PRIO_MAX
-};
-
-#define MLXSW_SP_MR_ROUTE_PRIO_MAX (__MLXSW_SP_MR_ROUTE_PRIO_MAX - 1)
-
struct mlxsw_sp_mr_route_key {
int vrid;
enum mlxsw_sp_l3proto proto;
@@ -101,7 +61,7 @@ struct mlxsw_sp_mr_ops {
u16 erif_index);
void (*route_destroy)(struct mlxsw_sp *mlxsw_sp, void *priv,
void *route_priv);
- void (*fini)(void *priv);
+ void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
};
struct mlxsw_sp_mr;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
index 4f4c0d311883..346f4a5fe053 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
@@ -1,41 +1,9 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/netdevice.h>
-#include <linux/parman.h>
#include "spectrum_mr_tcam.h"
#include "reg.h"
@@ -43,15 +11,8 @@
#include "core_acl_flex_actions.h"
#include "spectrum_mr.h"
-struct mlxsw_sp_mr_tcam_region {
- struct mlxsw_sp *mlxsw_sp;
- enum mlxsw_reg_rtar_key_type rtar_key_type;
- struct parman *parman;
- struct parman_prio *parman_prios;
-};
-
struct mlxsw_sp_mr_tcam {
- struct mlxsw_sp_mr_tcam_region tcam_regions[MLXSW_SP_L3_PROTO_MAX];
+ void *priv;
};
/* This struct maps to one RIGR2 register entry */
@@ -84,8 +45,6 @@ mlxsw_sp_mr_erif_list_init(struct mlxsw_sp_mr_tcam_erif_list *erif_list)
INIT_LIST_HEAD(&erif_list->erif_sublists);
}
-#define MLXSW_SP_KVDL_RIGR2_SIZE 1
-
static struct mlxsw_sp_mr_erif_sublist *
mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_tcam_erif_list *erif_list)
@@ -96,8 +55,8 @@ mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp,
erif_sublist = kzalloc(sizeof(*erif_sublist), GFP_KERNEL);
if (!erif_sublist)
return ERR_PTR(-ENOMEM);
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_RIGR2_SIZE,
- &erif_sublist->rigr2_kvdl_index);
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
+ 1, &erif_sublist->rigr2_kvdl_index);
if (err) {
kfree(erif_sublist);
return ERR_PTR(err);
@@ -112,7 +71,8 @@ mlxsw_sp_mr_erif_sublist_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_erif_sublist *erif_sublist)
{
list_del(&erif_sublist->list);
- mlxsw_sp_kvdl_free(mlxsw_sp, erif_sublist->rigr2_kvdl_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
+ 1, erif_sublist->rigr2_kvdl_index);
kfree(erif_sublist);
}
@@ -221,12 +181,11 @@ struct mlxsw_sp_mr_tcam_route {
struct mlxsw_sp_mr_tcam_erif_list erif_list;
struct mlxsw_afa_block *afa_block;
u32 counter_index;
- struct parman_item parman_item;
- struct parman_prio *parman_prio;
enum mlxsw_sp_mr_route_action action;
struct mlxsw_sp_mr_route_key key;
u16 irif_index;
u16 min_mtu;
+ void *priv;
};
static struct mlxsw_afa_block *
@@ -297,60 +256,6 @@ mlxsw_sp_mr_tcam_afa_block_destroy(struct mlxsw_afa_block *afa_block)
mlxsw_afa_block_destroy(afa_block);
}
-static int mlxsw_sp_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp,
- struct parman_item *parman_item,
- struct mlxsw_sp_mr_route_key *key,
- struct mlxsw_afa_block *afa_block)
-{
- char rmft2_pl[MLXSW_REG_RMFT2_LEN];
-
- switch (key->proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, true, parman_item->index,
- key->vrid,
- MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
- ntohl(key->group.addr4),
- ntohl(key->group_mask.addr4),
- ntohl(key->source.addr4),
- ntohl(key->source_mask.addr4),
- mlxsw_afa_block_first_set(afa_block));
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, true, parman_item->index,
- key->vrid,
- MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
- key->group.addr6,
- key->group_mask.addr6,
- key->source.addr6,
- key->source_mask.addr6,
- mlxsw_afa_block_first_set(afa_block));
- }
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
-}
-
-static int mlxsw_sp_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp, int vrid,
- struct mlxsw_sp_mr_route_key *key,
- struct parman_item *parman_item)
-{
- struct in6_addr zero_addr = IN6ADDR_ANY_INIT;
- char rmft2_pl[MLXSW_REG_RMFT2_LEN];
-
- switch (key->proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index,
- vrid, 0, 0, 0, 0, 0, 0, NULL);
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, false, parman_item->index,
- vrid, 0, 0, zero_addr, zero_addr,
- zero_addr, zero_addr, NULL);
- break;
- }
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
-}
-
static int
mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_tcam_erif_list *erif_list,
@@ -370,51 +275,12 @@ mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static struct mlxsw_sp_mr_tcam_region *
-mlxsw_sp_mr_tcam_protocol_region(struct mlxsw_sp_mr_tcam *mr_tcam,
- enum mlxsw_sp_l3proto proto)
-{
- return &mr_tcam->tcam_regions[proto];
-}
-
-static int
-mlxsw_sp_mr_tcam_route_parman_item_add(struct mlxsw_sp_mr_tcam *mr_tcam,
- struct mlxsw_sp_mr_tcam_route *route,
- enum mlxsw_sp_mr_route_prio prio)
-{
- struct mlxsw_sp_mr_tcam_region *tcam_region;
- int err;
-
- tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam,
- route->key.proto);
- err = parman_item_add(tcam_region->parman,
- &tcam_region->parman_prios[prio],
- &route->parman_item);
- if (err)
- return err;
-
- route->parman_prio = &tcam_region->parman_prios[prio];
- return 0;
-}
-
-static void
-mlxsw_sp_mr_tcam_route_parman_item_remove(struct mlxsw_sp_mr_tcam *mr_tcam,
- struct mlxsw_sp_mr_tcam_route *route)
-{
- struct mlxsw_sp_mr_tcam_region *tcam_region;
-
- tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam,
- route->key.proto);
-
- parman_item_remove(tcam_region->parman,
- route->parman_prio, &route->parman_item);
-}
-
static int
mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
void *route_priv,
struct mlxsw_sp_mr_route_params *route_params)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
int err;
@@ -448,22 +314,23 @@ mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
goto err_afa_block_create;
}
- /* Allocate place in the TCAM */
- err = mlxsw_sp_mr_tcam_route_parman_item_add(mr_tcam, route,
- route_params->prio);
- if (err)
- goto err_parman_item_add;
+ route->priv = kzalloc(ops->route_priv_size, GFP_KERNEL);
+ if (!route->priv) {
+ err = -ENOMEM;
+ goto err_route_priv_alloc;
+ }
/* Write the route to the TCAM */
- err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
- &route->key, route->afa_block);
+ err = ops->route_create(mlxsw_sp, mr_tcam->priv, route->priv,
+ &route->key, route->afa_block,
+ route_params->prio);
if (err)
- goto err_route_replace;
+ goto err_route_create;
return 0;
-err_route_replace:
- mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
-err_parman_item_add:
+err_route_create:
+ kfree(route->priv);
+err_route_priv_alloc:
mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
err_afa_block_create:
mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
@@ -476,12 +343,12 @@ err_counter_alloc:
static void mlxsw_sp_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp,
void *priv, void *route_priv)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
- mlxsw_sp_mr_tcam_route_remove(mlxsw_sp, route->key.vrid,
- &route->key, &route->parman_item);
- mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
+ ops->route_destroy(mlxsw_sp, mr_tcam->priv, route->priv, &route->key);
+ kfree(route->priv);
mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
@@ -502,6 +369,7 @@ mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp,
void *route_priv,
enum mlxsw_sp_mr_route_action route_action)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_afa_block *afa_block;
int err;
@@ -516,8 +384,7 @@ mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp,
return PTR_ERR(afa_block);
/* Update the TCAM route entry */
- err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
- &route->key, afa_block);
+ err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
if (err)
goto err;
@@ -534,6 +401,7 @@ err:
static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp,
void *route_priv, u16 min_mtu)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_afa_block *afa_block;
int err;
@@ -549,8 +417,7 @@ static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp,
return PTR_ERR(afa_block);
/* Update the TCAM route entry */
- err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
- &route->key, afa_block);
+ err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
if (err)
goto err;
@@ -596,6 +463,7 @@ static int mlxsw_sp_mr_tcam_route_erif_add(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp,
void *route_priv, u16 erif_index)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_sp_mr_erif_sublist *erif_sublist;
struct mlxsw_sp_mr_tcam_erif_list erif_list;
@@ -630,8 +498,7 @@ static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp,
}
/* Update the TCAM route entry */
- err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
- &route->key, afa_block);
+ err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
if (err)
goto err_route_write;
@@ -653,6 +520,7 @@ static int
mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv,
struct mlxsw_sp_mr_route_info *route_info)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_sp_mr_tcam_erif_list erif_list;
struct mlxsw_afa_block *afa_block;
@@ -677,8 +545,7 @@ mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv,
}
/* Update the TCAM route entry */
- err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
- &route->key, afa_block);
+ err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
if (err)
goto err_route_write;
@@ -699,167 +566,36 @@ err_erif_populate:
return err;
}
-#define MLXSW_SP_MR_TCAM_REGION_BASE_COUNT 16
-#define MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP 16
-
-static int
-mlxsw_sp_mr_tcam_region_alloc(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
-{
- struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
- char rtar_pl[MLXSW_REG_RTAR_LEN];
-
- mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_ALLOCATE,
- mr_tcam_region->rtar_key_type,
- MLXSW_SP_MR_TCAM_REGION_BASE_COUNT);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
-}
-
-static void
-mlxsw_sp_mr_tcam_region_free(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
-{
- struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
- char rtar_pl[MLXSW_REG_RTAR_LEN];
-
- mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_DEALLOCATE,
- mr_tcam_region->rtar_key_type, 0);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
-}
-
-static int mlxsw_sp_mr_tcam_region_parman_resize(void *priv,
- unsigned long new_count)
-{
- struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv;
- struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
- char rtar_pl[MLXSW_REG_RTAR_LEN];
- u64 max_tcam_rules;
-
- max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
- if (new_count > max_tcam_rules)
- return -EINVAL;
- mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_RESIZE,
- mr_tcam_region->rtar_key_type, new_count);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
-}
-
-static void mlxsw_sp_mr_tcam_region_parman_move(void *priv,
- unsigned long from_index,
- unsigned long to_index,
- unsigned long count)
-{
- struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv;
- struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
- char rrcr_pl[MLXSW_REG_RRCR_LEN];
-
- mlxsw_reg_rrcr_pack(rrcr_pl, MLXSW_REG_RRCR_OP_MOVE,
- from_index, count,
- mr_tcam_region->rtar_key_type, to_index);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rrcr), rrcr_pl);
-}
-
-static const struct parman_ops mlxsw_sp_mr_tcam_region_parman_ops = {
- .base_count = MLXSW_SP_MR_TCAM_REGION_BASE_COUNT,
- .resize_step = MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP,
- .resize = mlxsw_sp_mr_tcam_region_parman_resize,
- .move = mlxsw_sp_mr_tcam_region_parman_move,
- .algo = PARMAN_ALGO_TYPE_LSORT,
-};
-
-static int
-mlxsw_sp_mr_tcam_region_init(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_mr_tcam_region *mr_tcam_region,
- enum mlxsw_reg_rtar_key_type rtar_key_type)
-{
- struct parman_prio *parman_prios;
- struct parman *parman;
- int err;
- int i;
-
- mr_tcam_region->rtar_key_type = rtar_key_type;
- mr_tcam_region->mlxsw_sp = mlxsw_sp;
-
- err = mlxsw_sp_mr_tcam_region_alloc(mr_tcam_region);
- if (err)
- return err;
-
- parman = parman_create(&mlxsw_sp_mr_tcam_region_parman_ops,
- mr_tcam_region);
- if (!parman) {
- err = -ENOMEM;
- goto err_parman_create;
- }
- mr_tcam_region->parman = parman;
-
- parman_prios = kmalloc_array(MLXSW_SP_MR_ROUTE_PRIO_MAX + 1,
- sizeof(*parman_prios), GFP_KERNEL);
- if (!parman_prios) {
- err = -ENOMEM;
- goto err_parman_prios_alloc;
- }
- mr_tcam_region->parman_prios = parman_prios;
-
- for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
- parman_prio_init(mr_tcam_region->parman,
- &mr_tcam_region->parman_prios[i], i);
- return 0;
-
-err_parman_prios_alloc:
- parman_destroy(parman);
-err_parman_create:
- mlxsw_sp_mr_tcam_region_free(mr_tcam_region);
- return err;
-}
-
-static void
-mlxsw_sp_mr_tcam_region_fini(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
-{
- int i;
-
- for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
- parman_prio_fini(&mr_tcam_region->parman_prios[i]);
- kfree(mr_tcam_region->parman_prios);
- parman_destroy(mr_tcam_region->parman);
- mlxsw_sp_mr_tcam_region_free(mr_tcam_region);
-}
-
static int mlxsw_sp_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
- struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
- u32 rtar_key;
int err;
- if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES) ||
- !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES))
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES))
return -EIO;
- rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST;
- err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
- &region[MLXSW_SP_L3_PROTO_IPV4],
- rtar_key);
- if (err)
- return err;
+ mr_tcam->priv = kzalloc(ops->priv_size, GFP_KERNEL);
+ if (!mr_tcam->priv)
+ return -ENOMEM;
- rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST;
- err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
- &region[MLXSW_SP_L3_PROTO_IPV6],
- rtar_key);
+ err = ops->init(mlxsw_sp, mr_tcam->priv);
if (err)
- goto err_ipv6_region_init;
-
+ goto err_init;
return 0;
-err_ipv6_region_init:
- mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
+err_init:
+ kfree(mr_tcam->priv);
return err;
}
-static void mlxsw_sp_mr_tcam_fini(void *priv)
+static void mlxsw_sp_mr_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
- struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
- mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV6]);
- mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
+ ops->fini(mr_tcam->priv);
+ kfree(mr_tcam->priv);
}
const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h
index f9b59ee25406..3c84151b4c33 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_SPECTRUM_MCROUTER_TCAM_H
#define _MLXSW_SPECTRUM_MCROUTER_TCAM_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index cad603c35271..bdf53cf350f6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Nogah Frankel <nogahf@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 77b2adb29341..3a96307f51b0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -1,39 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
- * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
- * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/types.h>
@@ -48,6 +14,7 @@
#include <linux/route.h>
#include <linux/gcd.h>
#include <linux/random.h>
+#include <linux/if_macvlan.h>
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
@@ -60,6 +27,7 @@
#include <net/ndisc.h>
#include <net/ipv6.h>
#include <net/fib_notifier.h>
+#include <net/switchdev.h>
#include "spectrum.h"
#include "core.h"
@@ -163,7 +131,9 @@ struct mlxsw_sp_rif_ops {
const struct mlxsw_sp_rif_params *params);
int (*configure)(struct mlxsw_sp_rif *rif);
void (*deconfigure)(struct mlxsw_sp_rif *rif);
- struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
+ struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack);
+ void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
};
static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
@@ -342,10 +312,6 @@ static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
}
-static struct mlxsw_sp_rif *
-mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev);
-
#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
struct mlxsw_sp_prefix_usage {
@@ -1109,7 +1075,8 @@ mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
u32 tunnel_index;
int err;
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &tunnel_index);
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ 1, &tunnel_index);
if (err)
return err;
@@ -1125,7 +1092,8 @@ static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
/* Unlink this node from the IPIP entry that it's the decap entry of. */
fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
fib_entry->decap.ipip_entry = NULL;
- mlxsw_sp_kvdl_free(mlxsw_sp, fib_entry->decap.tunnel_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ 1, fib_entry->decap.tunnel_index);
}
static struct mlxsw_sp_fib_node *
@@ -2434,17 +2402,48 @@ static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
kfree(net_work);
}
+static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
+
+static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
+{
+ struct mlxsw_sp_netevent_work *net_work =
+ container_of(work, struct mlxsw_sp_netevent_work, work);
+ struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
+
+ __mlxsw_sp_router_init(mlxsw_sp);
+ kfree(net_work);
+}
+
+static int mlxsw_sp_router_schedule_work(struct net *net,
+ struct notifier_block *nb,
+ void (*cb)(struct work_struct *))
+{
+ struct mlxsw_sp_netevent_work *net_work;
+ struct mlxsw_sp_router *router;
+
+ if (!net_eq(net, &init_net))
+ return NOTIFY_DONE;
+
+ net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
+ if (!net_work)
+ return NOTIFY_BAD;
+
+ router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
+ INIT_WORK(&net_work->work, cb);
+ net_work->mlxsw_sp = router->mlxsw_sp;
+ mlxsw_core_schedule_work(&net_work->work);
+ return NOTIFY_DONE;
+}
+
static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct mlxsw_sp_netevent_work *net_work;
struct mlxsw_sp_port *mlxsw_sp_port;
- struct mlxsw_sp_router *router;
struct mlxsw_sp *mlxsw_sp;
unsigned long interval;
struct neigh_parms *p;
struct neighbour *n;
- struct net *net;
switch (event) {
case NETEVENT_DELAY_PROBE_TIME_UPDATE:
@@ -2498,20 +2497,12 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
break;
case NETEVENT_IPV4_MPATH_HASH_UPDATE:
case NETEVENT_IPV6_MPATH_HASH_UPDATE:
- net = ptr;
-
- if (!net_eq(net, &init_net))
- return NOTIFY_DONE;
+ return mlxsw_sp_router_schedule_work(ptr, nb,
+ mlxsw_sp_router_mp_hash_event_work);
- net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
- if (!net_work)
- return NOTIFY_BAD;
-
- router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
- INIT_WORK(&net_work->work, mlxsw_sp_router_mp_hash_event_work);
- net_work->mlxsw_sp = router->mlxsw_sp;
- mlxsw_core_schedule_work(&net_work->work);
- break;
+ case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
+ return mlxsw_sp_router_schedule_work(ptr, nb,
+ mlxsw_sp_router_update_priority_work);
}
return NOTIFY_DONE;
@@ -3165,8 +3156,9 @@ static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
* by the device and make sure the request can be satisfied.
*/
mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
- err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size,
- &alloc_size);
+ err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
+ MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ *p_adj_grp_size, &alloc_size);
if (err)
return err;
/* It is possible the allocation results in more allocated
@@ -3278,7 +3270,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
/* No valid allocation size available. */
goto set_trap;
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ ecmp_size, &adj_index);
if (err) {
/* We ran out of KVD linear space, just set the
* trap and let everything flow through kernel.
@@ -3313,7 +3306,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
old_adj_index, old_ecmp_size);
- mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ old_ecmp_size, old_adj_index);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
goto set_trap;
@@ -3335,7 +3329,8 @@ set_trap:
if (err)
dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
if (old_adj_index_valid)
- mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ nh_grp->ecmp_size, nh_grp->adj_index);
}
static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
@@ -5967,7 +5962,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
return NOTIFY_DONE;
}
-static struct mlxsw_sp_rif *
+struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev)
{
@@ -6024,6 +6019,12 @@ mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
!list_empty(&inet6_dev->addr_list))
addr_list_empty = false;
+ /* macvlans do not have a RIF, but rather piggy back on the
+ * RIF of their lower device.
+ */
+ if (netif_is_macvlan(dev) && addr_list_empty)
+ return true;
+
if (rif && addr_list_empty &&
!netif_is_l3_slave(rif->dev))
return true;
@@ -6125,6 +6126,11 @@ const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
return rif->dev;
}
+struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif)
+{
+ return rif->fid;
+}
+
static struct mlxsw_sp_rif *
mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_rif_params *params,
@@ -6162,7 +6168,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
rif->ops = ops;
if (ops->fid_get) {
- fid = ops->fid_get(rif);
+ fid = ops->fid_get(rif, extack);
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
goto err_fid_get;
@@ -6267,7 +6273,7 @@ mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
}
/* FID was already created, just take a reference */
- fid = rif->ops->fid_get(rif);
+ fid = rif->ops->fid_get(rif, extack);
err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
if (err)
goto err_fid_port_vid_map;
@@ -6432,6 +6438,123 @@ static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
return 0;
}
+static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
+{
+ u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
+ u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
+
+ return ether_addr_equal_masked(mac, vrrp4, mask);
+}
+
+static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
+{
+ u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
+ u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
+
+ return ether_addr_equal_masked(mac, vrrp6, mask);
+}
+
+static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
+ const u8 *mac, bool adding)
+{
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ u8 vrrp_id = adding ? mac[5] : 0;
+ int err;
+
+ if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
+ !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
+ return 0;
+
+ mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+ if (err)
+ return err;
+
+ if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
+ mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
+ else
+ mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *macvlan_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
+ struct mlxsw_sp_rif *rif;
+ int err;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
+ if (!rif) {
+ NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
+ return -EOPNOTSUPP;
+ }
+
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), true);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
+ macvlan_dev->dev_addr, true);
+ if (err)
+ goto err_rif_vrrp_add;
+
+ /* Make sure the bridge driver does not have this MAC pointing at
+ * some other port.
+ */
+ if (rif->ops->fdb_del)
+ rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
+
+ return 0;
+
+err_rif_vrrp_add:
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+ return err;
+}
+
+void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *macvlan_dev)
+{
+ struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
+ struct mlxsw_sp_rif *rif;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
+ /* If we do not have a RIF, then we already took care of
+ * removing the macvlan's MAC during RIF deletion.
+ */
+ if (!rif)
+ return;
+ mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
+ false);
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+}
+
+static int mlxsw_sp_inetaddr_macvlan_event(struct net_device *macvlan_dev,
+ unsigned long event,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp;
+
+ mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
+ if (!mlxsw_sp)
+ return 0;
+
+ switch (event) {
+ case NETDEV_UP:
+ return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
+ case NETDEV_DOWN:
+ mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
+ break;
+ }
+
+ return 0;
+}
+
static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
unsigned long event,
struct netlink_ext_ack *extack)
@@ -6444,6 +6567,8 @@ static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
return mlxsw_sp_inetaddr_bridge_event(dev, event, extack);
else if (is_vlan_dev(dev))
return mlxsw_sp_inetaddr_vlan_event(dev, event, extack);
+ else if (netif_is_macvlan(dev))
+ return mlxsw_sp_inetaddr_macvlan_event(dev, event, extack);
else
return 0;
}
@@ -6684,7 +6809,10 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
int err = 0;
- if (!mlxsw_sp)
+ /* We do not create a RIF for a macvlan, but only use it to
+ * direct more MAC addresses to the router.
+ */
+ if (!mlxsw_sp || netif_is_macvlan(l3_dev))
return 0;
switch (event) {
@@ -6705,6 +6833,27 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
return err;
}
+static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev, void *data)
+{
+ struct mlxsw_sp_rif *rif = data;
+
+ if (!netif_is_macvlan(dev))
+ return 0;
+
+ return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+}
+
+static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
+{
+ if (!netif_is_macvlan_port(rif->dev))
+ return 0;
+
+ netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
+ return netdev_walk_all_upper_dev_rcu(rif->dev,
+ __mlxsw_sp_rif_macvlan_flush, rif);
+}
+
static struct mlxsw_sp_rif_subport *
mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
{
@@ -6771,11 +6920,13 @@ static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
mlxsw_sp_fid_rif_set(fid, NULL);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
+ mlxsw_sp_rif_macvlan_flush(rif);
mlxsw_sp_rif_subport_op(rif, false);
}
static struct mlxsw_sp_fid *
-mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
+mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
{
return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
}
@@ -6857,6 +7008,7 @@ static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
mlxsw_sp_fid_rif_set(fid, NULL);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
+ mlxsw_sp_rif_macvlan_flush(rif);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
mlxsw_sp_router_port(mlxsw_sp), false);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
@@ -6865,19 +7017,49 @@ static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
}
static struct mlxsw_sp_fid *
-mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
+mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
{
- u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
+ u16 vid;
+ int err;
+
+ if (is_vlan_dev(rif->dev)) {
+ vid = vlan_dev_vlan_id(rif->dev);
+ } else {
+ err = br_vlan_get_pvid(rif->dev, &vid);
+ if (err < 0 || !vid) {
+ NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
+ return ERR_PTR(-EINVAL);
+ }
+ }
return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
}
+static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
+{
+ u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+ struct switchdev_notifier_fdb_info info;
+ struct net_device *br_dev;
+ struct net_device *dev;
+
+ br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev;
+ dev = br_fdb_find_port(br_dev, mac, vid);
+ if (!dev)
+ return;
+
+ info.addr = mac;
+ info.vid = vid;
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info);
+}
+
static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
.type = MLXSW_SP_RIF_TYPE_VLAN,
.rif_size = sizeof(struct mlxsw_sp_rif),
.configure = mlxsw_sp_rif_vlan_configure,
.deconfigure = mlxsw_sp_rif_vlan_deconfigure,
.fid_get = mlxsw_sp_rif_vlan_fid_get,
+ .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
};
static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
@@ -6929,6 +7111,7 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
mlxsw_sp_fid_rif_set(fid, NULL);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
+ mlxsw_sp_rif_macvlan_flush(rif);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
mlxsw_sp_router_port(mlxsw_sp), false);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
@@ -6937,17 +7120,33 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
}
static struct mlxsw_sp_fid *
-mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
+mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
{
return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
}
+static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
+{
+ struct switchdev_notifier_fdb_info info;
+ struct net_device *dev;
+
+ dev = br_fdb_find_port(rif->dev, mac, 0);
+ if (!dev)
+ return;
+
+ info.addr = mac;
+ info.vid = 0;
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info);
+}
+
static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
.type = MLXSW_SP_RIF_TYPE_FID,
.rif_size = sizeof(struct mlxsw_sp_rif),
.configure = mlxsw_sp_rif_fid_configure,
.deconfigure = mlxsw_sp_rif_fid_deconfigure,
.fid_get = mlxsw_sp_rif_fid_fid_get,
+ .fdb_del = mlxsw_sp_rif_fid_fdb_del,
};
static struct mlxsw_sp_rif_ipip_lb *
@@ -7172,6 +7371,7 @@ static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
+ bool usp = init_net.ipv4.sysctl_ip_fwd_update_priority;
char rgcr_pl[MLXSW_REG_RGCR_LEN];
u64 max_rifs;
int err;
@@ -7182,7 +7382,7 @@ static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
- mlxsw_reg_rgcr_usp_set(rgcr_pl, true);
+ mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index a01edcf56797..1a60391daafa 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Arkadi Sharshevsky <arkadis@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_ROUTER_H_
#define _MLXSW_ROUTER_H_
@@ -66,6 +35,8 @@ struct mlxsw_sp_neigh_entry;
struct mlxsw_sp_nexthop;
struct mlxsw_sp_ipip_entry;
+struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev);
struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
u16 rif_index);
u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
@@ -75,6 +46,7 @@ u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev);
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif);
u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif);
+struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 3d187d88cc7c..d965fd275c90 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -1,41 +1,11 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/mlxsw_span.c
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2018 Petr Machata <petrm@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
#include <linux/if_bridge.h>
#include <linux/list.h>
#include <net/arp.h>
#include <net/gre.h>
+#include <net/lag.h>
#include <net/ndisc.h>
#include <net/ip6_tunnel.h>
@@ -254,7 +224,9 @@ mlxsw_sp_span_entry_lag(struct net_device *lag_dev)
struct list_head *iter;
netdev_for_each_lower_dev(lag_dev, dev, iter)
- if ((dev->flags & IFF_UP) && mlxsw_sp_port_dev_check(dev))
+ if (netif_carrier_ok(dev) &&
+ net_lag_port_dev_txable(dev) &&
+ mlxsw_sp_port_dev_check(dev))
return dev;
return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
index 14a6de904db1..5e04252f2a11 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -1,35 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/mlxsw_span.h
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_SPECTRUM_SPAN_H
#define _MLXSW_SPECTRUM_SPAN_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index eea5666a86b2..0d8444aaba01 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1,38 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/types.h>
@@ -1135,6 +1102,39 @@ err_port_vlan_set:
return err;
}
+static int
+mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct mlxsw_sp_rif *rif;
+ struct mlxsw_sp_fid *fid;
+ u16 pvid;
+ u16 vid;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
+ if (!rif)
+ return 0;
+ fid = mlxsw_sp_rif_fid(rif);
+ pvid = mlxsw_sp_fid_8021q_vid(fid);
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
+ if (vid != pvid) {
+ netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
+ return -EBUSY;
+ }
+ } else {
+ if (vid == pvid) {
+ netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
+ return -EBUSY;
+ }
+ }
+ }
+
+ return 0;
+}
+
static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_vlan *vlan,
struct switchdev_trans *trans)
@@ -1146,8 +1146,18 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port;
u16 vid;
- if (netif_is_bridge_master(orig_dev))
- return -EOPNOTSUPP;
+ if (netif_is_bridge_master(orig_dev)) {
+ int err = 0;
+
+ if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
+ br_vlan_enabled(orig_dev) &&
+ switchdev_trans_ph_prepare(trans))
+ err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
+ orig_dev, vlan);
+ if (!err)
+ err = -EOPNOTSUPP;
+ return err;
+ }
if (switchdev_trans_ph_prepare(trans))
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h
index bc44d5effc28..c218e10bd835 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h
@@ -1,35 +1,5 @@
-/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchib.c b/drivers/net/ethernet/mellanox/mlxsw/switchib.c
index c698ec4fd9d4..bcf2e79a21c8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchib.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchib.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/switchib.c
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Elad Raz <eladr@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 3922c1cfe5f5..2d4f213e154d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -1,38 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/switchx2.c
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2015-2016 Elad Raz <eladr@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 399e9d6993f7..53020724c2f6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -1,38 +1,6 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/trap.h
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
+
#ifndef _MLXSW_TRAP_H
#define _MLXSW_TRAP_H
@@ -63,6 +31,7 @@ enum {
MLXSW_TRAP_ID_LBERROR = 0x54,
MLXSW_TRAP_ID_IPV4_OSPF = 0x55,
MLXSW_TRAP_ID_IPV4_PIM = 0x58,
+ MLXSW_TRAP_ID_IPV4_VRRP = 0x59,
MLXSW_TRAP_ID_RPF = 0x5C,
MLXSW_TRAP_ID_IP2ME = 0x5F,
MLXSW_TRAP_ID_IPV6_UNSPECIFIED_ADDRESS = 0x60,
@@ -78,6 +47,7 @@ enum {
MLXSW_TRAP_ID_IPV6_ALL_ROUTERS_LINK = 0x6F,
MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70,
MLXSW_TRAP_ID_IPV6_PIM = 0x79,
+ MLXSW_TRAP_ID_IPV6_VRRP = 0x7A,
MLXSW_TRAP_ID_IPV4_BGP = 0x88,
MLXSW_TRAP_ID_IPV6_BGP = 0x89,
MLXSW_TRAP_ID_L3_IPV6_ROUTER_SOLICITATION = 0x8A,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/txheader.h b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
index fdf94720ca62..da51dd9d5e44 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/txheader.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
@@ -1,37 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/txheader.h
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_TXHEADER_H
#define _MLXSW_TXHEADER_H
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index b72d1bd11296..ebbdfb908745 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -3373,7 +3373,6 @@ static void port_get_link_speed(struct ksz_port *port)
*/
static void port_set_link_speed(struct ksz_port *port)
{
- struct ksz_port_info *info;
struct ksz_hw *hw = port->hw;
u16 data;
u16 cfg;
@@ -3382,8 +3381,6 @@ static void port_set_link_speed(struct ksz_port *port)
int p;
for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
- info = &hw->port_info[p];
-
port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
port_r8(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index 71dca8bd51ac..16bd3f44dbe8 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -46,6 +46,7 @@ config LAN743X
tristate "LAN743x support"
depends on PCI
select PHYLIB
+ select CRC16
---help---
Support for the Microchip LAN743x PCI Express Gigabit Ethernet chip
diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile
index 2e982cc249fb..538926d2b43f 100644
--- a/drivers/net/ethernet/microchip/Makefile
+++ b/drivers/net/ethernet/microchip/Makefile
@@ -6,4 +6,4 @@ obj-$(CONFIG_ENC28J60) += enc28j60.o
obj-$(CONFIG_ENCX24J600) += encx24j600.o encx24j600-regmap.o
obj-$(CONFIG_LAN743X) += lan743x.o
-lan743x-objs := lan743x_main.o
+lan743x-objs := lan743x_main.o lan743x_ethtool.o lan743x_ptp.o
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
new file mode 100644
index 000000000000..07c1eb63415a
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -0,0 +1,723 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2018 Microchip Technology Inc. */
+
+#include <linux/netdevice.h>
+#include "lan743x_main.h"
+#include "lan743x_ethtool.h"
+#include <linux/net_tstamp.h>
+#include <linux/pci.h>
+#include <linux/phy.h>
+
+/* eeprom */
+#define LAN743X_EEPROM_MAGIC (0x74A5)
+#define LAN743X_OTP_MAGIC (0x74F3)
+#define EEPROM_INDICATOR_1 (0xA5)
+#define EEPROM_INDICATOR_2 (0xAA)
+#define EEPROM_MAC_OFFSET (0x01)
+#define MAX_EEPROM_SIZE 512
+#define OTP_INDICATOR_1 (0xF3)
+#define OTP_INDICATOR_2 (0xF7)
+
+static int lan743x_otp_write(struct lan743x_adapter *adapter, u32 offset,
+ u32 length, u8 *data)
+{
+ unsigned long timeout;
+ u32 buf;
+ int i;
+
+ buf = lan743x_csr_read(adapter, OTP_PWR_DN);
+
+ if (buf & OTP_PWR_DN_PWRDN_N_) {
+ /* clear it and wait to be cleared */
+ lan743x_csr_write(adapter, OTP_PWR_DN, 0);
+
+ timeout = jiffies + HZ;
+ do {
+ udelay(1);
+ buf = lan743x_csr_read(adapter, OTP_PWR_DN);
+ if (time_after(jiffies, timeout)) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "timeout on OTP_PWR_DN completion\n");
+ return -EIO;
+ }
+ } while (buf & OTP_PWR_DN_PWRDN_N_);
+ }
+
+ /* set to BYTE program mode */
+ lan743x_csr_write(adapter, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
+
+ for (i = 0; i < length; i++) {
+ lan743x_csr_write(adapter, OTP_ADDR1,
+ ((offset + i) >> 8) &
+ OTP_ADDR1_15_11_MASK_);
+ lan743x_csr_write(adapter, OTP_ADDR2,
+ ((offset + i) &
+ OTP_ADDR2_10_3_MASK_));
+ lan743x_csr_write(adapter, OTP_PRGM_DATA, data[i]);
+ lan743x_csr_write(adapter, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
+ lan743x_csr_write(adapter, OTP_CMD_GO, OTP_CMD_GO_GO_);
+
+ timeout = jiffies + HZ;
+ do {
+ udelay(1);
+ buf = lan743x_csr_read(adapter, OTP_STATUS);
+ if (time_after(jiffies, timeout)) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "Timeout on OTP_STATUS completion\n");
+ return -EIO;
+ }
+ } while (buf & OTP_STATUS_BUSY_);
+ }
+
+ return 0;
+}
+
+static int lan743x_eeprom_wait(struct lan743x_adapter *adapter)
+{
+ unsigned long start_time = jiffies;
+ u32 val;
+
+ do {
+ val = lan743x_csr_read(adapter, E2P_CMD);
+
+ if (!(val & E2P_CMD_EPC_BUSY_) ||
+ (val & E2P_CMD_EPC_TIMEOUT_))
+ break;
+ usleep_range(40, 100);
+ } while (!time_after(jiffies, start_time + HZ));
+
+ if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "EEPROM read operation timeout\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int lan743x_eeprom_confirm_not_busy(struct lan743x_adapter *adapter)
+{
+ unsigned long start_time = jiffies;
+ u32 val;
+
+ do {
+ val = lan743x_csr_read(adapter, E2P_CMD);
+
+ if (!(val & E2P_CMD_EPC_BUSY_))
+ return 0;
+
+ usleep_range(40, 100);
+ } while (!time_after(jiffies, start_time + HZ));
+
+ netif_warn(adapter, drv, adapter->netdev, "EEPROM is busy\n");
+ return -EIO;
+}
+
+static int lan743x_eeprom_read(struct lan743x_adapter *adapter,
+ u32 offset, u32 length, u8 *data)
+{
+ int retval;
+ u32 val;
+ int i;
+
+ retval = lan743x_eeprom_confirm_not_busy(adapter);
+ if (retval)
+ return retval;
+
+ for (i = 0; i < length; i++) {
+ val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
+ val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
+ lan743x_csr_write(adapter, E2P_CMD, val);
+
+ retval = lan743x_eeprom_wait(adapter);
+ if (retval < 0)
+ return retval;
+
+ val = lan743x_csr_read(adapter, E2P_DATA);
+ data[i] = val & 0xFF;
+ offset++;
+ }
+
+ return 0;
+}
+
+static int lan743x_eeprom_write(struct lan743x_adapter *adapter,
+ u32 offset, u32 length, u8 *data)
+{
+ int retval;
+ u32 val;
+ int i;
+
+ retval = lan743x_eeprom_confirm_not_busy(adapter);
+ if (retval)
+ return retval;
+
+ /* Issue write/erase enable command */
+ val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
+ lan743x_csr_write(adapter, E2P_CMD, val);
+
+ retval = lan743x_eeprom_wait(adapter);
+ if (retval < 0)
+ return retval;
+
+ for (i = 0; i < length; i++) {
+ /* Fill data register */
+ val = data[i];
+ lan743x_csr_write(adapter, E2P_DATA, val);
+
+ /* Send "write" command */
+ val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
+ val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
+ lan743x_csr_write(adapter, E2P_CMD, val);
+
+ retval = lan743x_eeprom_wait(adapter);
+ if (retval < 0)
+ return retval;
+
+ offset++;
+ }
+
+ return 0;
+}
+
+static void lan743x_ethtool_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strlcpy(info->bus_info,
+ pci_name(adapter->pdev), sizeof(info->bus_info));
+}
+
+static u32 lan743x_ethtool_get_msglevel(struct net_device *netdev)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->msg_enable;
+}
+
+static void lan743x_ethtool_set_msglevel(struct net_device *netdev,
+ u32 msglevel)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ adapter->msg_enable = msglevel;
+}
+
+static int lan743x_ethtool_get_eeprom_len(struct net_device *netdev)
+{
+ return MAX_EEPROM_SIZE;
+}
+
+static int lan743x_ethtool_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ return lan743x_eeprom_read(adapter, ee->offset, ee->len, data);
+}
+
+static int lan743x_ethtool_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int ret = -EINVAL;
+
+ if (ee->magic == LAN743X_EEPROM_MAGIC)
+ ret = lan743x_eeprom_write(adapter, ee->offset, ee->len,
+ data);
+ /* Beware! OTP is One Time Programming ONLY!
+ * So do some strict condition check before messing up
+ */
+ else if ((ee->magic == LAN743X_OTP_MAGIC) &&
+ (ee->offset == 0) &&
+ (ee->len == MAX_EEPROM_SIZE) &&
+ (data[0] == OTP_INDICATOR_1))
+ ret = lan743x_otp_write(adapter, ee->offset, ee->len, data);
+
+ return ret;
+}
+
+static const char lan743x_set0_hw_cnt_strings[][ETH_GSTRING_LEN] = {
+ "RX FCS Errors",
+ "RX Alignment Errors",
+ "Rx Fragment Errors",
+ "RX Jabber Errors",
+ "RX Undersize Frame Errors",
+ "RX Oversize Frame Errors",
+ "RX Dropped Frames",
+ "RX Unicast Byte Count",
+ "RX Broadcast Byte Count",
+ "RX Multicast Byte Count",
+ "RX Unicast Frames",
+ "RX Broadcast Frames",
+ "RX Multicast Frames",
+ "RX Pause Frames",
+ "RX 64 Byte Frames",
+ "RX 65 - 127 Byte Frames",
+ "RX 128 - 255 Byte Frames",
+ "RX 256 - 511 Bytes Frames",
+ "RX 512 - 1023 Byte Frames",
+ "RX 1024 - 1518 Byte Frames",
+ "RX Greater 1518 Byte Frames",
+};
+
+static const char lan743x_set1_sw_cnt_strings[][ETH_GSTRING_LEN] = {
+ "RX Queue 0 Frames",
+ "RX Queue 1 Frames",
+ "RX Queue 2 Frames",
+ "RX Queue 3 Frames",
+};
+
+static const char lan743x_set2_hw_cnt_strings[][ETH_GSTRING_LEN] = {
+ "RX Total Frames",
+ "EEE RX LPI Transitions",
+ "EEE RX LPI Time",
+ "RX Counter Rollover Status",
+ "TX FCS Errors",
+ "TX Excess Deferral Errors",
+ "TX Carrier Errors",
+ "TX Bad Byte Count",
+ "TX Single Collisions",
+ "TX Multiple Collisions",
+ "TX Excessive Collision",
+ "TX Late Collisions",
+ "TX Unicast Byte Count",
+ "TX Broadcast Byte Count",
+ "TX Multicast Byte Count",
+ "TX Unicast Frames",
+ "TX Broadcast Frames",
+ "TX Multicast Frames",
+ "TX Pause Frames",
+ "TX 64 Byte Frames",
+ "TX 65 - 127 Byte Frames",
+ "TX 128 - 255 Byte Frames",
+ "TX 256 - 511 Bytes Frames",
+ "TX 512 - 1023 Byte Frames",
+ "TX 1024 - 1518 Byte Frames",
+ "TX Greater 1518 Byte Frames",
+ "TX Total Frames",
+ "EEE TX LPI Transitions",
+ "EEE TX LPI Time",
+ "TX Counter Rollover Status",
+};
+
+static const u32 lan743x_set0_hw_cnt_addr[] = {
+ STAT_RX_FCS_ERRORS,
+ STAT_RX_ALIGNMENT_ERRORS,
+ STAT_RX_FRAGMENT_ERRORS,
+ STAT_RX_JABBER_ERRORS,
+ STAT_RX_UNDERSIZE_FRAME_ERRORS,
+ STAT_RX_OVERSIZE_FRAME_ERRORS,
+ STAT_RX_DROPPED_FRAMES,
+ STAT_RX_UNICAST_BYTE_COUNT,
+ STAT_RX_BROADCAST_BYTE_COUNT,
+ STAT_RX_MULTICAST_BYTE_COUNT,
+ STAT_RX_UNICAST_FRAMES,
+ STAT_RX_BROADCAST_FRAMES,
+ STAT_RX_MULTICAST_FRAMES,
+ STAT_RX_PAUSE_FRAMES,
+ STAT_RX_64_BYTE_FRAMES,
+ STAT_RX_65_127_BYTE_FRAMES,
+ STAT_RX_128_255_BYTE_FRAMES,
+ STAT_RX_256_511_BYTES_FRAMES,
+ STAT_RX_512_1023_BYTE_FRAMES,
+ STAT_RX_1024_1518_BYTE_FRAMES,
+ STAT_RX_GREATER_1518_BYTE_FRAMES,
+};
+
+static const u32 lan743x_set2_hw_cnt_addr[] = {
+ STAT_RX_TOTAL_FRAMES,
+ STAT_EEE_RX_LPI_TRANSITIONS,
+ STAT_EEE_RX_LPI_TIME,
+ STAT_RX_COUNTER_ROLLOVER_STATUS,
+ STAT_TX_FCS_ERRORS,
+ STAT_TX_EXCESS_DEFERRAL_ERRORS,
+ STAT_TX_CARRIER_ERRORS,
+ STAT_TX_BAD_BYTE_COUNT,
+ STAT_TX_SINGLE_COLLISIONS,
+ STAT_TX_MULTIPLE_COLLISIONS,
+ STAT_TX_EXCESSIVE_COLLISION,
+ STAT_TX_LATE_COLLISIONS,
+ STAT_TX_UNICAST_BYTE_COUNT,
+ STAT_TX_BROADCAST_BYTE_COUNT,
+ STAT_TX_MULTICAST_BYTE_COUNT,
+ STAT_TX_UNICAST_FRAMES,
+ STAT_TX_BROADCAST_FRAMES,
+ STAT_TX_MULTICAST_FRAMES,
+ STAT_TX_PAUSE_FRAMES,
+ STAT_TX_64_BYTE_FRAMES,
+ STAT_TX_65_127_BYTE_FRAMES,
+ STAT_TX_128_255_BYTE_FRAMES,
+ STAT_TX_256_511_BYTES_FRAMES,
+ STAT_TX_512_1023_BYTE_FRAMES,
+ STAT_TX_1024_1518_BYTE_FRAMES,
+ STAT_TX_GREATER_1518_BYTE_FRAMES,
+ STAT_TX_TOTAL_FRAMES,
+ STAT_EEE_TX_LPI_TRANSITIONS,
+ STAT_EEE_TX_LPI_TIME,
+ STAT_TX_COUNTER_ROLLOVER_STATUS
+};
+
+static void lan743x_ethtool_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, lan743x_set0_hw_cnt_strings,
+ sizeof(lan743x_set0_hw_cnt_strings));
+ memcpy(&data[sizeof(lan743x_set0_hw_cnt_strings)],
+ lan743x_set1_sw_cnt_strings,
+ sizeof(lan743x_set1_sw_cnt_strings));
+ memcpy(&data[sizeof(lan743x_set0_hw_cnt_strings) +
+ sizeof(lan743x_set1_sw_cnt_strings)],
+ lan743x_set2_hw_cnt_strings,
+ sizeof(lan743x_set2_hw_cnt_strings));
+ break;
+ }
+}
+
+static void lan743x_ethtool_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int data_index = 0;
+ u32 buf;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lan743x_set0_hw_cnt_addr); i++) {
+ buf = lan743x_csr_read(adapter, lan743x_set0_hw_cnt_addr[i]);
+ data[data_index++] = (u64)buf;
+ }
+ for (i = 0; i < ARRAY_SIZE(adapter->rx); i++)
+ data[data_index++] = (u64)(adapter->rx[i].frame_count);
+ for (i = 0; i < ARRAY_SIZE(lan743x_set2_hw_cnt_addr); i++) {
+ buf = lan743x_csr_read(adapter, lan743x_set2_hw_cnt_addr[i]);
+ data[data_index++] = (u64)buf;
+ }
+}
+
+static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ {
+ int ret;
+
+ ret = ARRAY_SIZE(lan743x_set0_hw_cnt_strings);
+ ret += ARRAY_SIZE(lan743x_set1_sw_cnt_strings);
+ ret += ARRAY_SIZE(lan743x_set2_hw_cnt_strings);
+ return ret;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int lan743x_ethtool_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *rxnfc,
+ u32 *rule_locs)
+{
+ switch (rxnfc->cmd) {
+ case ETHTOOL_GRXFH:
+ rxnfc->data = 0;
+ switch (rxnfc->flow_type) {
+ case TCP_V4_FLOW:case UDP_V4_FLOW:
+ case TCP_V6_FLOW:case UDP_V6_FLOW:
+ rxnfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fall through */
+ case IPV4_FLOW: case IPV6_FLOW:
+ rxnfc->data |= RXH_IP_SRC | RXH_IP_DST;
+ return 0;
+ }
+ break;
+ case ETHTOOL_GRXRINGS:
+ rxnfc->data = LAN743X_USED_RX_CHANNELS;
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
+
+static u32 lan743x_ethtool_get_rxfh_key_size(struct net_device *netdev)
+{
+ return 40;
+}
+
+static u32 lan743x_ethtool_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return 128;
+}
+
+static int lan743x_ethtool_get_rxfh(struct net_device *netdev,
+ u32 *indir, u8 *key, u8 *hfunc)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ if (indir) {
+ int dw_index;
+ int byte_index = 0;
+
+ for (dw_index = 0; dw_index < 32; dw_index++) {
+ u32 four_entries =
+ lan743x_csr_read(adapter, RFE_INDX(dw_index));
+
+ byte_index = dw_index << 2;
+ indir[byte_index + 0] =
+ ((four_entries >> 0) & 0x000000FF);
+ indir[byte_index + 1] =
+ ((four_entries >> 8) & 0x000000FF);
+ indir[byte_index + 2] =
+ ((four_entries >> 16) & 0x000000FF);
+ indir[byte_index + 3] =
+ ((four_entries >> 24) & 0x000000FF);
+ }
+ }
+ if (key) {
+ int dword_index;
+ int byte_index = 0;
+
+ for (dword_index = 0; dword_index < 10; dword_index++) {
+ u32 four_entries =
+ lan743x_csr_read(adapter,
+ RFE_HASH_KEY(dword_index));
+
+ byte_index = dword_index << 2;
+ key[byte_index + 0] =
+ ((four_entries >> 0) & 0x000000FF);
+ key[byte_index + 1] =
+ ((four_entries >> 8) & 0x000000FF);
+ key[byte_index + 2] =
+ ((four_entries >> 16) & 0x000000FF);
+ key[byte_index + 3] =
+ ((four_entries >> 24) & 0x000000FF);
+ }
+ }
+ if (hfunc)
+ (*hfunc) = ETH_RSS_HASH_TOP;
+ return 0;
+}
+
+static int lan743x_ethtool_set_rxfh(struct net_device *netdev,
+ const u32 *indir, const u8 *key,
+ const u8 hfunc)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (indir) {
+ u32 indir_value = 0;
+ int dword_index = 0;
+ int byte_index = 0;
+
+ for (dword_index = 0; dword_index < 32; dword_index++) {
+ byte_index = dword_index << 2;
+ indir_value =
+ (((indir[byte_index + 0] & 0x000000FF) << 0) |
+ ((indir[byte_index + 1] & 0x000000FF) << 8) |
+ ((indir[byte_index + 2] & 0x000000FF) << 16) |
+ ((indir[byte_index + 3] & 0x000000FF) << 24));
+ lan743x_csr_write(adapter, RFE_INDX(dword_index),
+ indir_value);
+ }
+ }
+ if (key) {
+ int dword_index = 0;
+ int byte_index = 0;
+ u32 key_value = 0;
+
+ for (dword_index = 0; dword_index < 10; dword_index++) {
+ byte_index = dword_index << 2;
+ key_value =
+ ((((u32)(key[byte_index + 0])) << 0) |
+ (((u32)(key[byte_index + 1])) << 8) |
+ (((u32)(key[byte_index + 2])) << 16) |
+ (((u32)(key[byte_index + 3])) << 24));
+ lan743x_csr_write(adapter, RFE_HASH_KEY(dword_index),
+ key_value);
+ }
+ }
+ return 0;
+}
+
+static int lan743x_ethtool_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *ts_info)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (adapter->ptp.ptp_clock)
+ ts_info->phc_index = ptp_clock_index(adapter->ptp.ptp_clock);
+ else
+ ts_info->phc_index = -1;
+
+ ts_info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON) |
+ BIT(HWTSTAMP_TX_ONESTEP_SYNC);
+ ts_info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+ return 0;
+}
+
+static int lan743x_ethtool_get_eee(struct net_device *netdev,
+ struct ethtool_eee *eee)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+ u32 buf;
+ int ret;
+
+ if (!phydev)
+ return -EIO;
+ if (!phydev->drv) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Missing PHY Driver\n");
+ return -EIO;
+ }
+
+ ret = phy_ethtool_get_eee(phydev, eee);
+ if (ret < 0)
+ return ret;
+
+ buf = lan743x_csr_read(adapter, MAC_CR);
+ if (buf & MAC_CR_EEE_EN_) {
+ eee->eee_enabled = true;
+ eee->eee_active = !!(eee->advertised & eee->lp_advertised);
+ eee->tx_lpi_enabled = true;
+ /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
+ buf = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT);
+ eee->tx_lpi_timer = buf;
+ } else {
+ eee->eee_enabled = false;
+ eee->eee_active = false;
+ eee->tx_lpi_enabled = false;
+ eee->tx_lpi_timer = 0;
+ }
+
+ return 0;
+}
+
+static int lan743x_ethtool_set_eee(struct net_device *netdev,
+ struct ethtool_eee *eee)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ struct phy_device *phydev = NULL;
+ u32 buf = 0;
+ int ret = 0;
+
+ if (!netdev)
+ return -EINVAL;
+ adapter = netdev_priv(netdev);
+ if (!adapter)
+ return -EINVAL;
+ phydev = netdev->phydev;
+ if (!phydev)
+ return -EIO;
+ if (!phydev->drv) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Missing PHY Driver\n");
+ return -EIO;
+ }
+
+ if (eee->eee_enabled) {
+ ret = phy_init_eee(phydev, 0);
+ if (ret) {
+ netif_err(adapter, drv, adapter->netdev,
+ "EEE initialization failed\n");
+ return ret;
+ }
+
+ buf = (u32)eee->tx_lpi_timer;
+ lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT, buf);
+
+ buf = lan743x_csr_read(adapter, MAC_CR);
+ buf |= MAC_CR_EEE_EN_;
+ lan743x_csr_write(adapter, MAC_CR, buf);
+ } else {
+ buf = lan743x_csr_read(adapter, MAC_CR);
+ buf &= ~MAC_CR_EEE_EN_;
+ lan743x_csr_write(adapter, MAC_CR, buf);
+ }
+
+ return phy_ethtool_set_eee(phydev, eee);
+}
+
+#ifdef CONFIG_PM
+static void lan743x_ethtool_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+ phy_ethtool_get_wol(netdev->phydev, wol);
+
+ wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST |
+ WAKE_MAGIC | WAKE_PHY | WAKE_ARP;
+
+ wol->wolopts |= adapter->wolopts;
+}
+
+static int lan743x_ethtool_set_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ adapter->wolopts = 0;
+ if (wol->wolopts & WAKE_UCAST)
+ adapter->wolopts |= WAKE_UCAST;
+ if (wol->wolopts & WAKE_MCAST)
+ adapter->wolopts |= WAKE_MCAST;
+ if (wol->wolopts & WAKE_BCAST)
+ adapter->wolopts |= WAKE_BCAST;
+ if (wol->wolopts & WAKE_MAGIC)
+ adapter->wolopts |= WAKE_MAGIC;
+ if (wol->wolopts & WAKE_PHY)
+ adapter->wolopts |= WAKE_PHY;
+ if (wol->wolopts & WAKE_ARP)
+ adapter->wolopts |= WAKE_ARP;
+
+ device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts);
+
+ phy_ethtool_set_wol(netdev->phydev, wol);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+const struct ethtool_ops lan743x_ethtool_ops = {
+ .get_drvinfo = lan743x_ethtool_get_drvinfo,
+ .get_msglevel = lan743x_ethtool_get_msglevel,
+ .set_msglevel = lan743x_ethtool_set_msglevel,
+ .get_link = ethtool_op_get_link,
+
+ .get_eeprom_len = lan743x_ethtool_get_eeprom_len,
+ .get_eeprom = lan743x_ethtool_get_eeprom,
+ .set_eeprom = lan743x_ethtool_set_eeprom,
+ .get_strings = lan743x_ethtool_get_strings,
+ .get_ethtool_stats = lan743x_ethtool_get_ethtool_stats,
+ .get_sset_count = lan743x_ethtool_get_sset_count,
+ .get_rxnfc = lan743x_ethtool_get_rxnfc,
+ .get_rxfh_key_size = lan743x_ethtool_get_rxfh_key_size,
+ .get_rxfh_indir_size = lan743x_ethtool_get_rxfh_indir_size,
+ .get_rxfh = lan743x_ethtool_get_rxfh,
+ .set_rxfh = lan743x_ethtool_set_rxfh,
+ .get_ts_info = lan743x_ethtool_get_ts_info,
+ .get_eee = lan743x_ethtool_get_eee,
+ .set_eee = lan743x_ethtool_set_eee,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+#ifdef CONFIG_PM
+ .get_wol = lan743x_ethtool_get_wol,
+ .set_wol = lan743x_ethtool_set_wol,
+#endif
+};
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.h b/drivers/net/ethernet/microchip/lan743x_ethtool.h
new file mode 100644
index 000000000000..d0d11a777a58
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2018 Microchip Technology Inc. */
+
+#ifndef _LAN743X_ETHTOOL_H
+#define _LAN743X_ETHTOOL_H
+
+#include "linux/ethtool.h"
+
+extern const struct ethtool_ops lan743x_ethtool_ops;
+
+#endif /* _LAN743X_ETHTOOL_H */
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index dd947e4dd3ce..e7dce79ff2c9 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -11,7 +11,9 @@
#include <linux/phy.h>
#include <linux/rtnetlink.h>
#include <linux/iopoll.h>
+#include <linux/crc16.h>
#include "lan743x_main.h"
+#include "lan743x_ethtool.h"
static void lan743x_pci_cleanup(struct lan743x_adapter *adapter)
{
@@ -53,13 +55,13 @@ return_error:
return ret;
}
-static u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset)
+u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset)
{
return ioread32(&adapter->csr.csr_address[offset]);
}
-static void lan743x_csr_write(struct lan743x_adapter *adapter, int offset,
- u32 data)
+void lan743x_csr_write(struct lan743x_adapter *adapter, int offset,
+ u32 data)
{
iowrite32(data, &adapter->csr.csr_address[offset]);
}
@@ -265,6 +267,10 @@ static void lan743x_intr_shared_isr(void *context, u32 int_sts, u32 flags)
lan743x_intr_software_isr(adapter);
int_sts &= ~INT_BIT_SW_GP_;
}
+ if (int_sts & INT_BIT_1588_) {
+ lan743x_ptp_isr(adapter);
+ int_sts &= ~INT_BIT_1588_;
+ }
}
if (int_sts)
lan743x_csr_write(adapter, INT_EN_CLR, int_sts);
@@ -828,7 +834,7 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter)
}
if (!mac_address_valid)
- random_ether_addr(adapter->mac_address);
+ eth_random_addr(adapter->mac_address);
lan743x_mac_set_address(adapter, adapter->mac_address);
ether_addr_copy(netdev->dev_addr, adapter->mac_address);
return 0;
@@ -974,6 +980,7 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
ksettings.base.duplex,
local_advertisement,
remote_advertisement);
+ lan743x_ptp_update_latency(adapter, ksettings.base.speed);
}
}
@@ -1023,6 +1030,24 @@ return_error:
return ret;
}
+static void lan743x_rfe_open(struct lan743x_adapter *adapter)
+{
+ lan743x_csr_write(adapter, RFE_RSS_CFG,
+ RFE_RSS_CFG_UDP_IPV6_EX_ |
+ RFE_RSS_CFG_TCP_IPV6_EX_ |
+ RFE_RSS_CFG_IPV6_EX_ |
+ RFE_RSS_CFG_UDP_IPV6_ |
+ RFE_RSS_CFG_TCP_IPV6_ |
+ RFE_RSS_CFG_IPV6_ |
+ RFE_RSS_CFG_UDP_IPV4_ |
+ RFE_RSS_CFG_TCP_IPV4_ |
+ RFE_RSS_CFG_IPV4_ |
+ RFE_RSS_CFG_VALID_HASH_BITS_ |
+ RFE_RSS_CFG_RSS_QUEUE_ENABLE_ |
+ RFE_RSS_CFG_RSS_HASH_STORE_ |
+ RFE_RSS_CFG_RSS_ENABLE_);
+}
+
static void lan743x_rfe_update_mac_address(struct lan743x_adapter *adapter)
{
u8 *mac_addr;
@@ -1206,6 +1231,7 @@ static void lan743x_tx_release_desc(struct lan743x_tx *tx,
struct lan743x_tx_buffer_info *buffer_info = NULL;
struct lan743x_tx_descriptor *descriptor = NULL;
u32 descriptor_type = 0;
+ bool ignore_sync;
descriptor = &tx->ring_cpu_ptr[descriptor_index];
buffer_info = &tx->buffer_info[descriptor_index];
@@ -1236,11 +1262,27 @@ clean_up_data_descriptor:
buffer_info->dma_ptr = 0;
buffer_info->buffer_length = 0;
}
- if (buffer_info->skb) {
+ if (!buffer_info->skb)
+ goto clear_active;
+
+ if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED)) {
dev_kfree_skb(buffer_info->skb);
- buffer_info->skb = NULL;
+ goto clear_skb;
}
+ if (cleanup) {
+ lan743x_ptp_unrequest_tx_timestamp(tx->adapter);
+ dev_kfree_skb(buffer_info->skb);
+ } else {
+ ignore_sync = (buffer_info->flags &
+ TX_BUFFER_INFO_FLAG_IGNORE_SYNC) != 0;
+ lan743x_ptp_tx_timestamp_skb(tx->adapter,
+ buffer_info->skb, ignore_sync);
+ }
+
+clear_skb:
+ buffer_info->skb = NULL;
+
clear_active:
buffer_info->flags &= ~TX_BUFFER_INFO_FLAG_ACTIVE;
@@ -1301,10 +1343,25 @@ static int lan743x_tx_get_avail_desc(struct lan743x_tx *tx)
return last_head - last_tail - 1;
}
+void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx,
+ bool enable_timestamping,
+ bool enable_onestep_sync)
+{
+ if (enable_timestamping)
+ tx->ts_flags |= TX_TS_FLAG_TIMESTAMPING_ENABLED;
+ else
+ tx->ts_flags &= ~TX_TS_FLAG_TIMESTAMPING_ENABLED;
+ if (enable_onestep_sync)
+ tx->ts_flags |= TX_TS_FLAG_ONE_STEP_SYNC;
+ else
+ tx->ts_flags &= ~TX_TS_FLAG_ONE_STEP_SYNC;
+}
+
static int lan743x_tx_frame_start(struct lan743x_tx *tx,
unsigned char *first_buffer,
unsigned int first_buffer_length,
unsigned int frame_length,
+ bool time_stamp,
bool check_sum)
{
/* called only from within lan743x_tx_xmit_frame.
@@ -1342,6 +1399,8 @@ static int lan743x_tx_frame_start(struct lan743x_tx *tx,
TX_DESC_DATA0_DTYPE_DATA_ |
TX_DESC_DATA0_FS_ |
TX_DESC_DATA0_FCS_;
+ if (time_stamp)
+ tx->frame_data0 |= TX_DESC_DATA0_TSE_;
if (check_sum)
tx->frame_data0 |= TX_DESC_DATA0_ICE_ |
@@ -1455,6 +1514,7 @@ static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
static void lan743x_tx_frame_end(struct lan743x_tx *tx,
struct sk_buff *skb,
+ bool time_stamp,
bool ignore_sync)
{
/* called only from within lan743x_tx_xmit_frame
@@ -1472,6 +1532,8 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx,
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
buffer_info = &tx->buffer_info[tx->frame_tail];
buffer_info->skb = skb;
+ if (time_stamp)
+ buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED;
if (ignore_sync)
buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
@@ -1500,6 +1562,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
unsigned int frame_length = 0;
unsigned int head_length = 0;
unsigned long irq_flags = 0;
+ bool do_timestamp = false;
bool ignore_sync = false;
int nr_frags = 0;
bool gso = false;
@@ -1521,6 +1584,14 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
}
/* space available, transmit skb */
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ (tx->ts_flags & TX_TS_FLAG_TIMESTAMPING_ENABLED) &&
+ (lan743x_ptp_request_tx_timestamp(tx->adapter))) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ do_timestamp = true;
+ if (tx->ts_flags & TX_TS_FLAG_ONE_STEP_SYNC)
+ ignore_sync = true;
+ }
head_length = skb_headlen(skb);
frame_length = skb_pagelen(skb);
nr_frags = skb_shinfo(skb)->nr_frags;
@@ -1534,6 +1605,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
if (lan743x_tx_frame_start(tx,
skb->data, head_length,
start_frame_length,
+ do_timestamp,
skb->ip_summed == CHECKSUM_PARTIAL)) {
dev_kfree_skb(skb);
goto unlock;
@@ -1561,7 +1633,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
}
finish:
- lan743x_tx_frame_end(tx, skb, ignore_sync);
+ lan743x_tx_frame_end(tx, skb, do_timestamp, ignore_sync);
unlock:
spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
@@ -2390,6 +2462,8 @@ static int lan743x_netdev_close(struct net_device *netdev)
for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++)
lan743x_rx_close(&adapter->rx[index]);
+ lan743x_ptp_close(adapter);
+
lan743x_phy_close(adapter);
lan743x_mac_close(adapter);
@@ -2417,6 +2491,12 @@ static int lan743x_netdev_open(struct net_device *netdev)
if (ret)
goto close_mac;
+ ret = lan743x_ptp_open(adapter);
+ if (ret)
+ goto close_phy;
+
+ lan743x_rfe_open(adapter);
+
for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
ret = lan743x_rx_open(&adapter->rx[index]);
if (ret)
@@ -2434,6 +2514,9 @@ close_rx:
if (adapter->rx[index].ring_cpu_ptr)
lan743x_rx_close(&adapter->rx[index]);
}
+ lan743x_ptp_close(adapter);
+
+close_phy:
lan743x_phy_close(adapter);
close_mac:
@@ -2461,6 +2544,8 @@ static int lan743x_netdev_ioctl(struct net_device *netdev,
{
if (!netif_running(netdev))
return -EINVAL;
+ if (cmd == SIOCSHWTSTAMP)
+ return lan743x_ptp_ioctl(netdev, ifr, cmd);
return phy_mii_ioctl(netdev->phydev, ifr, cmd);
}
@@ -2585,6 +2670,11 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
adapter->intr.irq = adapter->pdev->irq;
lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
mutex_init(&adapter->dp_lock);
+
+ ret = lan743x_gpio_init(adapter);
+ if (ret)
+ return ret;
+
ret = lan743x_mac_init(adapter);
if (ret)
return ret;
@@ -2593,6 +2683,10 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
if (ret)
return ret;
+ ret = lan743x_ptp_init(adapter);
+ if (ret)
+ return ret;
+
lan743x_rfe_update_mac_address(adapter);
ret = lan743x_dmac_init(adapter);
@@ -2689,6 +2783,7 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev,
goto cleanup_hardware;
adapter->netdev->netdev_ops = &lan743x_netdev_ops;
+ adapter->netdev->ethtool_ops = &lan743x_ethtool_ops;
adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
adapter->netdev->hw_features = adapter->netdev->features;
@@ -2747,10 +2842,182 @@ static void lan743x_pcidev_shutdown(struct pci_dev *pdev)
lan743x_netdev_close(netdev);
rtnl_unlock();
+#ifdef CONFIG_PM
+ pci_save_state(pdev);
+#endif
+
/* clean up lan743x portion */
lan743x_hardware_cleanup(adapter);
}
+#ifdef CONFIG_PM
+static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len)
+{
+ return bitrev16(crc16(0xFFFF, buf, len));
+}
+
+static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
+{
+ const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
+ const u8 ipv6_multicast[3] = { 0x33, 0x33 };
+ const u8 arp_type[2] = { 0x08, 0x06 };
+ int mask_index;
+ u32 pmtctl;
+ u32 wucsr;
+ u32 macrx;
+ u16 crc;
+
+ for (mask_index = 0; mask_index < MAC_NUM_OF_WUF_CFG; mask_index++)
+ lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 0);
+
+ /* clear wake settings */
+ pmtctl = lan743x_csr_read(adapter, PMT_CTL);
+ pmtctl |= PMT_CTL_WUPS_MASK_;
+ pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ |
+ PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ |
+ PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_);
+
+ macrx = lan743x_csr_read(adapter, MAC_RX);
+
+ wucsr = 0;
+ mask_index = 0;
+
+ pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_;
+
+ if (adapter->wolopts & WAKE_PHY) {
+ pmtctl |= PMT_CTL_ETH_PHY_EDPD_PLL_CTL_;
+ pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_;
+ }
+ if (adapter->wolopts & WAKE_MAGIC) {
+ wucsr |= MAC_WUCSR_MPEN_;
+ macrx |= MAC_RX_RXEN_;
+ pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
+ }
+ if (adapter->wolopts & WAKE_UCAST) {
+ wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_PFDA_EN_;
+ macrx |= MAC_RX_RXEN_;
+ pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
+ pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
+ }
+ if (adapter->wolopts & WAKE_BCAST) {
+ wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_BCST_EN_;
+ macrx |= MAC_RX_RXEN_;
+ pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
+ pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
+ }
+ if (adapter->wolopts & WAKE_MCAST) {
+ /* IPv4 multicast */
+ crc = lan743x_pm_wakeframe_crc16(ipv4_multicast, 3);
+ lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
+ MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ |
+ (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
+ (crc & MAC_WUF_CFG_CRC16_MASK_));
+ lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 7);
+ lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
+ lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
+ lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
+ mask_index++;
+
+ /* IPv6 multicast */
+ crc = lan743x_pm_wakeframe_crc16(ipv6_multicast, 2);
+ lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
+ MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ |
+ (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
+ (crc & MAC_WUF_CFG_CRC16_MASK_));
+ lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 3);
+ lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
+ lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
+ lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
+ mask_index++;
+
+ wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_;
+ macrx |= MAC_RX_RXEN_;
+ pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
+ pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
+ }
+ if (adapter->wolopts & WAKE_ARP) {
+ /* set MAC_WUF_CFG & WUF_MASK
+ * for packettype (offset 12,13) = ARP (0x0806)
+ */
+ crc = lan743x_pm_wakeframe_crc16(arp_type, 2);
+ lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
+ MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_ALL_ |
+ (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
+ (crc & MAC_WUF_CFG_CRC16_MASK_));
+ lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 0x3000);
+ lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
+ lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
+ lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
+ mask_index++;
+
+ wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_;
+ macrx |= MAC_RX_RXEN_;
+ pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
+ pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
+ }
+
+ lan743x_csr_write(adapter, MAC_WUCSR, wucsr);
+ lan743x_csr_write(adapter, PMT_CTL, pmtctl);
+ lan743x_csr_write(adapter, MAC_RX, macrx);
+}
+
+static int lan743x_pm_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int ret;
+
+ lan743x_pcidev_shutdown(pdev);
+
+ /* clear all wakes */
+ lan743x_csr_write(adapter, MAC_WUCSR, 0);
+ lan743x_csr_write(adapter, MAC_WUCSR2, 0);
+ lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF);
+
+ if (adapter->wolopts)
+ lan743x_pm_set_wol(adapter);
+
+ /* Host sets PME_En, put D3hot */
+ ret = pci_prepare_to_sleep(pdev);
+
+ return 0;
+}
+
+static int lan743x_pm_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int ret;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ ret = lan743x_hardware_init(adapter, pdev);
+ if (ret) {
+ netif_err(adapter, probe, adapter->netdev,
+ "lan743x_hardware_init returned %d\n", ret);
+ }
+
+ /* open netdev when netdev is at running state while resume.
+ * For instance, it is true when system wakesup after pm-suspend
+ * However, it is false when system wakes up after suspend GUI menu
+ */
+ if (netif_running(netdev))
+ lan743x_netdev_open(netdev);
+
+ netif_device_attach(netdev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops lan743x_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume)
+};
+#endif /*CONFIG_PM */
+
static const struct pci_device_id lan743x_pcidev_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
{ 0, }
@@ -2761,6 +3028,9 @@ static struct pci_driver lan743x_pcidev_driver = {
.id_table = lan743x_pcidev_tbl,
.probe = lan743x_pcidev_probe,
.remove = lan743x_pcidev_remove,
+#ifdef CONFIG_PM
+ .driver.pm = &lan743x_pm_ops,
+#endif
.shutdown = lan743x_pcidev_shutdown,
};
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 73b463a9df61..0e82b6368798 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -4,12 +4,17 @@
#ifndef _LAN743X_H
#define _LAN743X_H
+#include "lan743x_ptp.h"
+
#define DRIVER_AUTHOR "Bryan Whitehead <Bryan.Whitehead@microchip.com>"
#define DRIVER_DESC "LAN743x PCIe Gigabit Ethernet Driver"
#define DRIVER_NAME "lan743x"
/* Register Definitions */
#define ID_REV (0x00)
+#define ID_REV_ID_MASK_ (0xFFFF0000)
+#define ID_REV_ID_LAN7430_ (0x74300000)
+#define ID_REV_ID_LAN7431_ (0x74310000)
#define ID_REV_IS_VALID_CHIP_ID_(id_rev) \
(((id_rev) & 0xFFF00000) == 0x74300000)
#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF)
@@ -24,8 +29,18 @@
#define HW_CFG_LRST_ BIT(1)
#define PMT_CTL (0x014)
+#define PMT_CTL_ETH_PHY_D3_COLD_OVR_ BIT(27)
+#define PMT_CTL_MAC_D3_RX_CLK_OVR_ BIT(25)
+#define PMT_CTL_ETH_PHY_EDPD_PLL_CTL_ BIT(24)
+#define PMT_CTL_ETH_PHY_D3_OVR_ BIT(23)
+#define PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ BIT(18)
+#define PMT_CTL_GPIO_WAKEUP_EN_ BIT(15)
+#define PMT_CTL_EEE_WAKEUP_EN_ BIT(13)
#define PMT_CTL_READY_ BIT(7)
#define PMT_CTL_ETH_PHY_RST_ BIT(4)
+#define PMT_CTL_WOL_EN_ BIT(3)
+#define PMT_CTL_ETH_PHY_WAKE_EN_ BIT(2)
+#define PMT_CTL_WUPS_MASK_ (0x00000003)
#define DP_SEL (0x024)
#define DP_SEL_DPRDY_ BIT(31)
@@ -42,6 +57,31 @@
#define DP_DATA_0 (0x030)
+#define E2P_CMD (0x040)
+#define E2P_CMD_EPC_BUSY_ BIT(31)
+#define E2P_CMD_EPC_CMD_WRITE_ (0x30000000)
+#define E2P_CMD_EPC_CMD_EWEN_ (0x20000000)
+#define E2P_CMD_EPC_CMD_READ_ (0x00000000)
+#define E2P_CMD_EPC_TIMEOUT_ BIT(10)
+#define E2P_CMD_EPC_ADDR_MASK_ (0x000001FF)
+
+#define E2P_DATA (0x044)
+
+#define GPIO_CFG0 (0x050)
+#define GPIO_CFG0_GPIO_DIR_BIT_(bit) BIT(16 + (bit))
+#define GPIO_CFG0_GPIO_DATA_BIT_(bit) BIT(0 + (bit))
+
+#define GPIO_CFG1 (0x054)
+#define GPIO_CFG1_GPIOEN_BIT_(bit) BIT(16 + (bit))
+#define GPIO_CFG1_GPIOBUF_BIT_(bit) BIT(0 + (bit))
+
+#define GPIO_CFG2 (0x058)
+#define GPIO_CFG2_1588_POL_BIT_(bit) BIT(0 + (bit))
+
+#define GPIO_CFG3 (0x05C)
+#define GPIO_CFG3_1588_CH_SEL_BIT_(bit) BIT(16 + (bit))
+#define GPIO_CFG3_1588_OE_BIT_(bit) BIT(0 + (bit))
+
#define FCT_RX_CTL (0xAC)
#define FCT_RX_CTL_EN_(channel) BIT(28 + (channel))
#define FCT_RX_CTL_DIS_(channel) BIT(24 + (channel))
@@ -62,6 +102,7 @@
((value << 0) & FCT_FLOW_CTL_ON_THRESHOLD_)
#define MAC_CR (0x100)
+#define MAC_CR_EEE_EN_ BIT(17)
#define MAC_CR_ADD_ BIT(12)
#define MAC_CR_ASD_ BIT(11)
#define MAC_CR_CNTR_RST_ BIT(5)
@@ -97,6 +138,40 @@
#define MAC_MII_DATA (0x124)
+#define MAC_EEE_TX_LPI_REQ_DLY_CNT (0x130)
+
+#define MAC_WUCSR (0x140)
+#define MAC_WUCSR_RFE_WAKE_EN_ BIT(14)
+#define MAC_WUCSR_PFDA_EN_ BIT(3)
+#define MAC_WUCSR_WAKE_EN_ BIT(2)
+#define MAC_WUCSR_MPEN_ BIT(1)
+#define MAC_WUCSR_BCST_EN_ BIT(0)
+
+#define MAC_WK_SRC (0x144)
+
+#define MAC_WUF_CFG0 (0x150)
+#define MAC_NUM_OF_WUF_CFG (32)
+#define MAC_WUF_CFG_BEGIN (MAC_WUF_CFG0)
+#define MAC_WUF_CFG(index) (MAC_WUF_CFG_BEGIN + (4 * (index)))
+#define MAC_WUF_CFG_EN_ BIT(31)
+#define MAC_WUF_CFG_TYPE_MCAST_ (0x02000000)
+#define MAC_WUF_CFG_TYPE_ALL_ (0x01000000)
+#define MAC_WUF_CFG_OFFSET_SHIFT_ (16)
+#define MAC_WUF_CFG_CRC16_MASK_ (0x0000FFFF)
+
+#define MAC_WUF_MASK0_0 (0x200)
+#define MAC_WUF_MASK0_1 (0x204)
+#define MAC_WUF_MASK0_2 (0x208)
+#define MAC_WUF_MASK0_3 (0x20C)
+#define MAC_WUF_MASK0_BEGIN (MAC_WUF_MASK0_0)
+#define MAC_WUF_MASK1_BEGIN (MAC_WUF_MASK0_1)
+#define MAC_WUF_MASK2_BEGIN (MAC_WUF_MASK0_2)
+#define MAC_WUF_MASK3_BEGIN (MAC_WUF_MASK0_3)
+#define MAC_WUF_MASK0(index) (MAC_WUF_MASK0_BEGIN + (0x10 * (index)))
+#define MAC_WUF_MASK1(index) (MAC_WUF_MASK1_BEGIN + (0x10 * (index)))
+#define MAC_WUF_MASK2(index) (MAC_WUF_MASK2_BEGIN + (0x10 * (index)))
+#define MAC_WUF_MASK3(index) (MAC_WUF_MASK3_BEGIN + (0x10 * (index)))
+
/* offset 0x400 - 0x500, x may range from 0 to 32, for a total of 33 entries */
#define RFE_ADDR_FILT_HI(x) (0x400 + (8 * (x)))
#define RFE_ADDR_FILT_HI_VALID_ BIT(31)
@@ -111,13 +186,35 @@
#define RFE_CTL_MCAST_HASH_ BIT(3)
#define RFE_CTL_DA_PERFECT_ BIT(1)
+#define RFE_RSS_CFG (0x554)
+#define RFE_RSS_CFG_UDP_IPV6_EX_ BIT(16)
+#define RFE_RSS_CFG_TCP_IPV6_EX_ BIT(15)
+#define RFE_RSS_CFG_IPV6_EX_ BIT(14)
+#define RFE_RSS_CFG_UDP_IPV6_ BIT(13)
+#define RFE_RSS_CFG_TCP_IPV6_ BIT(12)
+#define RFE_RSS_CFG_IPV6_ BIT(11)
+#define RFE_RSS_CFG_UDP_IPV4_ BIT(10)
+#define RFE_RSS_CFG_TCP_IPV4_ BIT(9)
+#define RFE_RSS_CFG_IPV4_ BIT(8)
+#define RFE_RSS_CFG_VALID_HASH_BITS_ (0x000000E0)
+#define RFE_RSS_CFG_RSS_QUEUE_ENABLE_ BIT(2)
+#define RFE_RSS_CFG_RSS_HASH_STORE_ BIT(1)
+#define RFE_RSS_CFG_RSS_ENABLE_ BIT(0)
+
+#define RFE_HASH_KEY(index) (0x558 + (index << 2))
+
+#define RFE_INDX(index) (0x580 + (index << 2))
+
+#define MAC_WUCSR2 (0x600)
+
#define INT_STS (0x780)
#define INT_BIT_DMA_RX_(channel) BIT(24 + (channel))
#define INT_BIT_ALL_RX_ (0x0F000000)
#define INT_BIT_DMA_TX_(channel) BIT(16 + (channel))
#define INT_BIT_ALL_TX_ (0x000F0000)
#define INT_BIT_SW_GP_ BIT(9)
-#define INT_BIT_ALL_OTHER_ (0x00000280)
+#define INT_BIT_1588_ BIT(7)
+#define INT_BIT_ALL_OTHER_ (INT_BIT_SW_GP_ | INT_BIT_1588_)
#define INT_BIT_MAS_ BIT(0)
#define INT_SET (0x784)
@@ -158,6 +255,71 @@
#define INT_MOD_CFG6 (0x7D8)
#define INT_MOD_CFG7 (0x7DC)
+#define PTP_CMD_CTL (0x0A00)
+#define PTP_CMD_CTL_PTP_CLK_STP_NSEC_ BIT(6)
+#define PTP_CMD_CTL_PTP_CLOCK_STEP_SEC_ BIT(5)
+#define PTP_CMD_CTL_PTP_CLOCK_LOAD_ BIT(4)
+#define PTP_CMD_CTL_PTP_CLOCK_READ_ BIT(3)
+#define PTP_CMD_CTL_PTP_ENABLE_ BIT(2)
+#define PTP_CMD_CTL_PTP_DISABLE_ BIT(1)
+#define PTP_CMD_CTL_PTP_RESET_ BIT(0)
+#define PTP_GENERAL_CONFIG (0x0A04)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_(channel) \
+ (0x7 << (1 + ((channel) << 2)))
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_ (0)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_ (1)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_ (2)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_ (3)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_ (4)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_ (5)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_(channel, value) \
+ (((value) & 0x7) << (1 + ((channel) << 2)))
+#define PTP_GENERAL_CONFIG_RELOAD_ADD_X_(channel) (BIT((channel) << 2))
+
+#define PTP_INT_STS (0x0A08)
+#define PTP_INT_EN_SET (0x0A0C)
+#define PTP_INT_EN_CLR (0x0A10)
+#define PTP_INT_BIT_TX_SWTS_ERR_ BIT(13)
+#define PTP_INT_BIT_TX_TS_ BIT(12)
+#define PTP_INT_BIT_TIMER_B_ BIT(1)
+#define PTP_INT_BIT_TIMER_A_ BIT(0)
+
+#define PTP_CLOCK_SEC (0x0A14)
+#define PTP_CLOCK_NS (0x0A18)
+#define PTP_CLOCK_SUBNS (0x0A1C)
+#define PTP_CLOCK_RATE_ADJ (0x0A20)
+#define PTP_CLOCK_RATE_ADJ_DIR_ BIT(31)
+#define PTP_CLOCK_STEP_ADJ (0x0A2C)
+#define PTP_CLOCK_STEP_ADJ_DIR_ BIT(31)
+#define PTP_CLOCK_STEP_ADJ_VALUE_MASK_ (0x3FFFFFFF)
+#define PTP_CLOCK_TARGET_SEC_X(channel) (0x0A30 + ((channel) << 4))
+#define PTP_CLOCK_TARGET_NS_X(channel) (0x0A34 + ((channel) << 4))
+#define PTP_CLOCK_TARGET_RELOAD_SEC_X(channel) (0x0A38 + ((channel) << 4))
+#define PTP_CLOCK_TARGET_RELOAD_NS_X(channel) (0x0A3C + ((channel) << 4))
+#define PTP_LATENCY (0x0A5C)
+#define PTP_LATENCY_TX_SET_(tx_latency) (((u32)(tx_latency)) << 16)
+#define PTP_LATENCY_RX_SET_(rx_latency) \
+ (((u32)(rx_latency)) & 0x0000FFFF)
+#define PTP_CAP_INFO (0x0A60)
+#define PTP_CAP_INFO_TX_TS_CNT_GET_(reg_val) (((reg_val) & 0x00000070) >> 4)
+
+#define PTP_TX_MOD (0x0AA4)
+#define PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_ (0x10000000)
+
+#define PTP_TX_MOD2 (0x0AA8)
+#define PTP_TX_MOD2_TX_PTP_CLR_UDPV4_CHKSUM_ (0x00000001)
+
+#define PTP_TX_EGRESS_SEC (0x0AAC)
+#define PTP_TX_EGRESS_NS (0x0AB0)
+#define PTP_TX_EGRESS_NS_CAPTURE_CAUSE_MASK_ (0xC0000000)
+#define PTP_TX_EGRESS_NS_CAPTURE_CAUSE_AUTO_ (0x00000000)
+#define PTP_TX_EGRESS_NS_CAPTURE_CAUSE_SW_ (0x40000000)
+#define PTP_TX_EGRESS_NS_TS_NS_MASK_ (0x3FFFFFFF)
+
+#define PTP_TX_MSG_HEADER (0x0AB4)
+#define PTP_TX_MSG_HEADER_MSG_TYPE_ (0x000F0000)
+#define PTP_TX_MSG_HEADER_MSG_TYPE_SYNC_ (0x00000000)
+
#define DMAC_CFG (0xC00)
#define DMAC_CFG_COAL_EN_ BIT(16)
#define DMAC_CFG_CH_ARB_SEL_RX_HIGH_ (0x00000000)
@@ -288,9 +450,33 @@
#define TX_CFG_C_TX_DMA_INT_STS_AUTO_CLR_ BIT(3)
#define TX_CFG_C_TX_INT_STS_R2C_MODE_MASK_ (0x00000007)
+#define OTP_PWR_DN (0x1000)
+#define OTP_PWR_DN_PWRDN_N_ BIT(0)
+
+#define OTP_ADDR1 (0x1004)
+#define OTP_ADDR1_15_11_MASK_ (0x1F)
+
+#define OTP_ADDR2 (0x1008)
+#define OTP_ADDR2_10_3_MASK_ (0xFF)
+
+#define OTP_PRGM_DATA (0x1010)
+
+#define OTP_PRGM_MODE (0x1014)
+#define OTP_PRGM_MODE_BYTE_ BIT(0)
+
+#define OTP_TST_CMD (0x1024)
+#define OTP_TST_CMD_PRGVRFY_ BIT(3)
+
+#define OTP_CMD_GO (0x1028)
+#define OTP_CMD_GO_GO_ BIT(0)
+
+#define OTP_STATUS (0x1030)
+#define OTP_STATUS_BUSY_ BIT(0)
+
/* MAC statistics registers */
#define STAT_RX_FCS_ERRORS (0x1200)
#define STAT_RX_ALIGNMENT_ERRORS (0x1204)
+#define STAT_RX_FRAGMENT_ERRORS (0x1208)
#define STAT_RX_JABBER_ERRORS (0x120C)
#define STAT_RX_UNDERSIZE_FRAME_ERRORS (0x1210)
#define STAT_RX_OVERSIZE_FRAME_ERRORS (0x1214)
@@ -298,12 +484,26 @@
#define STAT_RX_UNICAST_BYTE_COUNT (0x121C)
#define STAT_RX_BROADCAST_BYTE_COUNT (0x1220)
#define STAT_RX_MULTICAST_BYTE_COUNT (0x1224)
+#define STAT_RX_UNICAST_FRAMES (0x1228)
+#define STAT_RX_BROADCAST_FRAMES (0x122C)
#define STAT_RX_MULTICAST_FRAMES (0x1230)
+#define STAT_RX_PAUSE_FRAMES (0x1234)
+#define STAT_RX_64_BYTE_FRAMES (0x1238)
+#define STAT_RX_65_127_BYTE_FRAMES (0x123C)
+#define STAT_RX_128_255_BYTE_FRAMES (0x1240)
+#define STAT_RX_256_511_BYTES_FRAMES (0x1244)
+#define STAT_RX_512_1023_BYTE_FRAMES (0x1248)
+#define STAT_RX_1024_1518_BYTE_FRAMES (0x124C)
+#define STAT_RX_GREATER_1518_BYTE_FRAMES (0x1250)
#define STAT_RX_TOTAL_FRAMES (0x1254)
+#define STAT_EEE_RX_LPI_TRANSITIONS (0x1258)
+#define STAT_EEE_RX_LPI_TIME (0x125C)
+#define STAT_RX_COUNTER_ROLLOVER_STATUS (0x127C)
#define STAT_TX_FCS_ERRORS (0x1280)
#define STAT_TX_EXCESS_DEFERRAL_ERRORS (0x1284)
#define STAT_TX_CARRIER_ERRORS (0x1288)
+#define STAT_TX_BAD_BYTE_COUNT (0x128C)
#define STAT_TX_SINGLE_COLLISIONS (0x1290)
#define STAT_TX_MULTIPLE_COLLISIONS (0x1294)
#define STAT_TX_EXCESSIVE_COLLISION (0x1298)
@@ -311,8 +511,21 @@
#define STAT_TX_UNICAST_BYTE_COUNT (0x12A0)
#define STAT_TX_BROADCAST_BYTE_COUNT (0x12A4)
#define STAT_TX_MULTICAST_BYTE_COUNT (0x12A8)
+#define STAT_TX_UNICAST_FRAMES (0x12AC)
+#define STAT_TX_BROADCAST_FRAMES (0x12B0)
#define STAT_TX_MULTICAST_FRAMES (0x12B4)
+#define STAT_TX_PAUSE_FRAMES (0x12B8)
+#define STAT_TX_64_BYTE_FRAMES (0x12BC)
+#define STAT_TX_65_127_BYTE_FRAMES (0x12C0)
+#define STAT_TX_128_255_BYTE_FRAMES (0x12C4)
+#define STAT_TX_256_511_BYTES_FRAMES (0x12C8)
+#define STAT_TX_512_1023_BYTE_FRAMES (0x12CC)
+#define STAT_TX_1024_1518_BYTE_FRAMES (0x12D0)
+#define STAT_TX_GREATER_1518_BYTE_FRAMES (0x12D4)
#define STAT_TX_TOTAL_FRAMES (0x12D8)
+#define STAT_EEE_TX_LPI_TRANSITIONS (0x12DC)
+#define STAT_EEE_TX_LPI_TIME (0x12E0)
+#define STAT_TX_COUNTER_ROLLOVER_STATUS (0x12FC)
/* End of Register definitions */
@@ -415,8 +628,12 @@ struct lan743x_tx_buffer_info;
#define TX_FRAME_FLAG_IN_PROGRESS BIT(0)
+#define TX_TS_FLAG_TIMESTAMPING_ENABLED BIT(0)
+#define TX_TS_FLAG_ONE_STEP_SYNC BIT(1)
+
struct lan743x_tx {
struct lan743x_adapter *adapter;
+ u32 ts_flags;
u32 vector_flags;
int channel_number;
@@ -443,6 +660,10 @@ struct lan743x_tx {
struct sk_buff *overflow_skb;
};
+void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx,
+ bool enable_timestamping,
+ bool enable_onestep_sync);
+
/* RX */
struct lan743x_rx_descriptor;
struct lan743x_rx_buffer_info;
@@ -473,6 +694,9 @@ struct lan743x_adapter {
struct net_device *netdev;
struct mii_bus *mdiobus;
int msg_enable;
+#ifdef CONFIG_PM
+ u32 wolopts;
+#endif
struct pci_dev *pdev;
struct lan743x_csr csr;
struct lan743x_intr intr;
@@ -480,6 +704,9 @@ struct lan743x_adapter {
/* lock, used to prevent concurrent access to data port */
struct mutex dp_lock;
+ struct lan743x_gpio gpio;
+ struct lan743x_ptp ptp;
+
u8 mac_address[ETH_ALEN];
struct lan743x_phy phy;
@@ -530,6 +757,7 @@ struct lan743x_adapter {
#define TX_DESC_DATA0_IPE_ (0x00200000)
#define TX_DESC_DATA0_TPE_ (0x00100000)
#define TX_DESC_DATA0_FCS_ (0x00020000)
+#define TX_DESC_DATA0_TSE_ (0x00010000)
#define TX_DESC_DATA0_BUF_LENGTH_MASK_ (0x0000FFFF)
#define TX_DESC_DATA0_EXT_LSO_ (0x00200000)
#define TX_DESC_DATA0_EXT_PAY_LENGTH_MASK_ (0x000FFFFF)
@@ -543,6 +771,7 @@ struct lan743x_tx_descriptor {
} __aligned(DEFAULT_DMA_DESCRIPTOR_SPACING);
#define TX_BUFFER_INFO_FLAG_ACTIVE BIT(0)
+#define TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED BIT(1)
#define TX_BUFFER_INFO_FLAG_IGNORE_SYNC BIT(2)
#define TX_BUFFER_INFO_FLAG_SKB_FRAGMENT BIT(3)
struct lan743x_tx_buffer_info {
@@ -594,4 +823,7 @@ struct lan743x_rx_buffer_info {
#define RX_PROCESS_RESULT_PACKET_RECEIVED (1)
#define RX_PROCESS_RESULT_PACKET_DROPPED (2)
+u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset);
+void lan743x_csr_write(struct lan743x_adapter *adapter, int offset, u32 data);
+
#endif /* _LAN743X_H */
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
new file mode 100644
index 000000000000..64dba96edc79
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -0,0 +1,1160 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2018 Microchip Technology Inc. */
+
+#include <linux/netdevice.h>
+#include "lan743x_main.h"
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/net_tstamp.h>
+
+#include "lan743x_ptp.h"
+
+#define LAN743X_NUMBER_OF_GPIO (12)
+#define LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB (31249999)
+#define LAN743X_PTP_MAX_FINE_ADJ_IN_SCALED_PPM (2047999934)
+
+static bool lan743x_ptp_is_enabled(struct lan743x_adapter *adapter);
+static void lan743x_ptp_enable(struct lan743x_adapter *adapter);
+static void lan743x_ptp_disable(struct lan743x_adapter *adapter);
+static void lan743x_ptp_reset(struct lan743x_adapter *adapter);
+static void lan743x_ptp_clock_set(struct lan743x_adapter *adapter,
+ u32 seconds, u32 nano_seconds,
+ u32 sub_nano_seconds);
+
+int lan743x_gpio_init(struct lan743x_adapter *adapter)
+{
+ struct lan743x_gpio *gpio = &adapter->gpio;
+
+ spin_lock_init(&gpio->gpio_lock);
+
+ gpio->gpio_cfg0 = 0; /* set all direction to input, data = 0 */
+ gpio->gpio_cfg1 = 0x0FFF0000;/* disable all gpio, set to open drain */
+ gpio->gpio_cfg2 = 0;/* set all to 1588 low polarity level */
+ gpio->gpio_cfg3 = 0;/* disable all 1588 output */
+ lan743x_csr_write(adapter, GPIO_CFG0, gpio->gpio_cfg0);
+ lan743x_csr_write(adapter, GPIO_CFG1, gpio->gpio_cfg1);
+ lan743x_csr_write(adapter, GPIO_CFG2, gpio->gpio_cfg2);
+ lan743x_csr_write(adapter, GPIO_CFG3, gpio->gpio_cfg3);
+
+ return 0;
+}
+
+static void lan743x_ptp_wait_till_cmd_done(struct lan743x_adapter *adapter,
+ u32 bit_mask)
+{
+ int timeout = 1000;
+ u32 data = 0;
+
+ while (timeout &&
+ (data = (lan743x_csr_read(adapter, PTP_CMD_CTL) &
+ bit_mask))) {
+ usleep_range(1000, 20000);
+ timeout--;
+ }
+ if (data) {
+ netif_err(adapter, drv, adapter->netdev,
+ "timeout waiting for cmd to be done, cmd = 0x%08X\n",
+ bit_mask);
+ }
+}
+
+static void lan743x_ptp_tx_ts_enqueue_ts(struct lan743x_adapter *adapter,
+ u32 seconds, u32 nano_seconds,
+ u32 header)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ spin_lock_bh(&ptp->tx_ts_lock);
+ if (ptp->tx_ts_queue_size < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS) {
+ ptp->tx_ts_seconds_queue[ptp->tx_ts_queue_size] = seconds;
+ ptp->tx_ts_nseconds_queue[ptp->tx_ts_queue_size] = nano_seconds;
+ ptp->tx_ts_header_queue[ptp->tx_ts_queue_size] = header;
+ ptp->tx_ts_queue_size++;
+ } else {
+ netif_err(adapter, drv, adapter->netdev,
+ "tx ts queue overflow\n");
+ }
+ spin_unlock_bh(&ptp->tx_ts_lock);
+}
+
+static void lan743x_ptp_tx_ts_complete(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ struct skb_shared_hwtstamps tstamps;
+ u32 header, nseconds, seconds;
+ bool ignore_sync = false;
+ struct sk_buff *skb;
+ int c, i;
+
+ spin_lock_bh(&ptp->tx_ts_lock);
+ c = ptp->tx_ts_skb_queue_size;
+
+ if (c > ptp->tx_ts_queue_size)
+ c = ptp->tx_ts_queue_size;
+ if (c <= 0)
+ goto done;
+
+ for (i = 0; i < c; i++) {
+ ignore_sync = ((ptp->tx_ts_ignore_sync_queue &
+ BIT(i)) != 0);
+ skb = ptp->tx_ts_skb_queue[i];
+ nseconds = ptp->tx_ts_nseconds_queue[i];
+ seconds = ptp->tx_ts_seconds_queue[i];
+ header = ptp->tx_ts_header_queue[i];
+
+ memset(&tstamps, 0, sizeof(tstamps));
+ tstamps.hwtstamp = ktime_set(seconds, nseconds);
+ if (!ignore_sync ||
+ ((header & PTP_TX_MSG_HEADER_MSG_TYPE_) !=
+ PTP_TX_MSG_HEADER_MSG_TYPE_SYNC_))
+ skb_tstamp_tx(skb, &tstamps);
+
+ dev_kfree_skb(skb);
+
+ ptp->tx_ts_skb_queue[i] = NULL;
+ ptp->tx_ts_seconds_queue[i] = 0;
+ ptp->tx_ts_nseconds_queue[i] = 0;
+ ptp->tx_ts_header_queue[i] = 0;
+ }
+
+ /* shift queue */
+ ptp->tx_ts_ignore_sync_queue >>= c;
+ for (i = c; i < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS; i++) {
+ ptp->tx_ts_skb_queue[i - c] = ptp->tx_ts_skb_queue[i];
+ ptp->tx_ts_seconds_queue[i - c] = ptp->tx_ts_seconds_queue[i];
+ ptp->tx_ts_nseconds_queue[i - c] = ptp->tx_ts_nseconds_queue[i];
+ ptp->tx_ts_header_queue[i - c] = ptp->tx_ts_header_queue[i];
+
+ ptp->tx_ts_skb_queue[i] = NULL;
+ ptp->tx_ts_seconds_queue[i] = 0;
+ ptp->tx_ts_nseconds_queue[i] = 0;
+ ptp->tx_ts_header_queue[i] = 0;
+ }
+ ptp->tx_ts_skb_queue_size -= c;
+ ptp->tx_ts_queue_size -= c;
+done:
+ ptp->pending_tx_timestamps -= c;
+ spin_unlock_bh(&ptp->tx_ts_lock);
+}
+
+static int lan743x_ptp_reserve_event_ch(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ int result = -ENODEV;
+ int index = 0;
+
+ mutex_lock(&ptp->command_lock);
+ for (index = 0; index < LAN743X_PTP_NUMBER_OF_EVENT_CHANNELS; index++) {
+ if (!(test_bit(index, &ptp->used_event_ch))) {
+ ptp->used_event_ch |= BIT(index);
+ result = index;
+ break;
+ }
+ }
+ mutex_unlock(&ptp->command_lock);
+ return result;
+}
+
+static void lan743x_ptp_release_event_ch(struct lan743x_adapter *adapter,
+ int event_channel)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_lock(&ptp->command_lock);
+ if (test_bit(event_channel, &ptp->used_event_ch)) {
+ ptp->used_event_ch &= ~BIT(event_channel);
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "attempted release on a not used event_channel = %d\n",
+ event_channel);
+ }
+ mutex_unlock(&ptp->command_lock);
+}
+
+static void lan743x_ptp_clock_get(struct lan743x_adapter *adapter,
+ u32 *seconds, u32 *nano_seconds,
+ u32 *sub_nano_seconds);
+static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter,
+ s64 time_step_ns);
+
+static int lan743x_gpio_rsrv_ptp_out(struct lan743x_adapter *adapter,
+ int bit, int ptp_channel)
+{
+ struct lan743x_gpio *gpio = &adapter->gpio;
+ unsigned long irq_flags = 0;
+ int bit_mask = BIT(bit);
+ int ret = -EBUSY;
+
+ spin_lock_irqsave(&gpio->gpio_lock, irq_flags);
+
+ if (!(gpio->used_bits & bit_mask)) {
+ gpio->used_bits |= bit_mask;
+ gpio->output_bits |= bit_mask;
+ gpio->ptp_bits |= bit_mask;
+
+ /* set as output, and zero initial value */
+ gpio->gpio_cfg0 |= GPIO_CFG0_GPIO_DIR_BIT_(bit);
+ gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DATA_BIT_(bit);
+ lan743x_csr_write(adapter, GPIO_CFG0, gpio->gpio_cfg0);
+
+ /* enable gpio, and set buffer type to push pull */
+ gpio->gpio_cfg1 &= ~GPIO_CFG1_GPIOEN_BIT_(bit);
+ gpio->gpio_cfg1 |= GPIO_CFG1_GPIOBUF_BIT_(bit);
+ lan743x_csr_write(adapter, GPIO_CFG1, gpio->gpio_cfg1);
+
+ /* set 1588 polarity to high */
+ gpio->gpio_cfg2 |= GPIO_CFG2_1588_POL_BIT_(bit);
+ lan743x_csr_write(adapter, GPIO_CFG2, gpio->gpio_cfg2);
+
+ if (!ptp_channel) {
+ /* use channel A */
+ gpio->gpio_cfg3 &= ~GPIO_CFG3_1588_CH_SEL_BIT_(bit);
+ } else {
+ /* use channel B */
+ gpio->gpio_cfg3 |= GPIO_CFG3_1588_CH_SEL_BIT_(bit);
+ }
+ gpio->gpio_cfg3 |= GPIO_CFG3_1588_OE_BIT_(bit);
+ lan743x_csr_write(adapter, GPIO_CFG3, gpio->gpio_cfg3);
+
+ ret = bit;
+ }
+ spin_unlock_irqrestore(&gpio->gpio_lock, irq_flags);
+ return ret;
+}
+
+static void lan743x_gpio_release(struct lan743x_adapter *adapter, int bit)
+{
+ struct lan743x_gpio *gpio = &adapter->gpio;
+ unsigned long irq_flags = 0;
+ int bit_mask = BIT(bit);
+
+ spin_lock_irqsave(&gpio->gpio_lock, irq_flags);
+ if (gpio->used_bits & bit_mask) {
+ gpio->used_bits &= ~bit_mask;
+ if (gpio->output_bits & bit_mask) {
+ gpio->output_bits &= ~bit_mask;
+
+ if (gpio->ptp_bits & bit_mask) {
+ gpio->ptp_bits &= ~bit_mask;
+ /* disable ptp output */
+ gpio->gpio_cfg3 &= ~GPIO_CFG3_1588_OE_BIT_(bit);
+ lan743x_csr_write(adapter, GPIO_CFG3,
+ gpio->gpio_cfg3);
+ }
+ /* release gpio output */
+
+ /* disable gpio */
+ gpio->gpio_cfg1 |= GPIO_CFG1_GPIOEN_BIT_(bit);
+ gpio->gpio_cfg1 &= ~GPIO_CFG1_GPIOBUF_BIT_(bit);
+ lan743x_csr_write(adapter, GPIO_CFG1, gpio->gpio_cfg1);
+
+ /* reset back to input */
+ gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DIR_BIT_(bit);
+ gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DATA_BIT_(bit);
+ lan743x_csr_write(adapter, GPIO_CFG0, gpio->gpio_cfg0);
+ }
+ }
+ spin_unlock_irqrestore(&gpio->gpio_lock, irq_flags);
+}
+
+static int lan743x_ptpci_adjfine(struct ptp_clock_info *ptpci, long scaled_ppm)
+{
+ struct lan743x_ptp *ptp =
+ container_of(ptpci, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(ptp, struct lan743x_adapter, ptp);
+ u32 lan743x_rate_adj = 0;
+ bool positive = true;
+ u64 u64_delta = 0;
+
+ if ((scaled_ppm < (-LAN743X_PTP_MAX_FINE_ADJ_IN_SCALED_PPM)) ||
+ scaled_ppm > LAN743X_PTP_MAX_FINE_ADJ_IN_SCALED_PPM) {
+ return -EINVAL;
+ }
+ if (scaled_ppm > 0) {
+ u64_delta = (u64)scaled_ppm;
+ positive = true;
+ } else {
+ u64_delta = (u64)(-scaled_ppm);
+ positive = false;
+ }
+ u64_delta = (u64_delta << 19);
+ lan743x_rate_adj = div_u64(u64_delta, 1000000);
+
+ if (positive)
+ lan743x_rate_adj |= PTP_CLOCK_RATE_ADJ_DIR_;
+
+ lan743x_csr_write(adapter, PTP_CLOCK_RATE_ADJ,
+ lan743x_rate_adj);
+
+ return 0;
+}
+
+static int lan743x_ptpci_adjfreq(struct ptp_clock_info *ptpci, s32 delta_ppb)
+{
+ struct lan743x_ptp *ptp =
+ container_of(ptpci, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(ptp, struct lan743x_adapter, ptp);
+ u32 lan743x_rate_adj = 0;
+ bool positive = true;
+ u32 u32_delta = 0;
+ u64 u64_delta = 0;
+
+ if ((delta_ppb < (-LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB)) ||
+ delta_ppb > LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB) {
+ return -EINVAL;
+ }
+ if (delta_ppb > 0) {
+ u32_delta = (u32)delta_ppb;
+ positive = true;
+ } else {
+ u32_delta = (u32)(-delta_ppb);
+ positive = false;
+ }
+ u64_delta = (((u64)u32_delta) << 35);
+ lan743x_rate_adj = div_u64(u64_delta, 1000000000);
+
+ if (positive)
+ lan743x_rate_adj |= PTP_CLOCK_RATE_ADJ_DIR_;
+
+ lan743x_csr_write(adapter, PTP_CLOCK_RATE_ADJ,
+ lan743x_rate_adj);
+
+ return 0;
+}
+
+static int lan743x_ptpci_adjtime(struct ptp_clock_info *ptpci, s64 delta)
+{
+ struct lan743x_ptp *ptp =
+ container_of(ptpci, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(ptp, struct lan743x_adapter, ptp);
+
+ lan743x_ptp_clock_step(adapter, delta);
+
+ return 0;
+}
+
+static int lan743x_ptpci_gettime64(struct ptp_clock_info *ptpci,
+ struct timespec64 *ts)
+{
+ struct lan743x_ptp *ptp =
+ container_of(ptpci, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(ptp, struct lan743x_adapter, ptp);
+ u32 nano_seconds = 0;
+ u32 seconds = 0;
+
+ lan743x_ptp_clock_get(adapter, &seconds, &nano_seconds, NULL);
+ ts->tv_sec = seconds;
+ ts->tv_nsec = nano_seconds;
+
+ return 0;
+}
+
+static int lan743x_ptpci_settime64(struct ptp_clock_info *ptpci,
+ const struct timespec64 *ts)
+{
+ struct lan743x_ptp *ptp =
+ container_of(ptpci, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(ptp, struct lan743x_adapter, ptp);
+ u32 nano_seconds = 0;
+ u32 seconds = 0;
+
+ if (ts) {
+ if (ts->tv_sec > 0xFFFFFFFFLL ||
+ ts->tv_sec < 0) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "ts->tv_sec out of range, %lld\n",
+ ts->tv_sec);
+ return -ERANGE;
+ }
+ if (ts->tv_nsec >= 1000000000L ||
+ ts->tv_nsec < 0) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "ts->tv_nsec out of range, %ld\n",
+ ts->tv_nsec);
+ return -ERANGE;
+ }
+ seconds = ts->tv_sec;
+ nano_seconds = ts->tv_nsec;
+ lan743x_ptp_clock_set(adapter, seconds, nano_seconds, 0);
+ } else {
+ netif_warn(adapter, drv, adapter->netdev, "ts == NULL\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void lan743x_ptp_perout_off(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ u32 general_config = 0;
+
+ if (ptp->perout_gpio_bit >= 0) {
+ lan743x_gpio_release(adapter, ptp->perout_gpio_bit);
+ ptp->perout_gpio_bit = -1;
+ }
+
+ if (ptp->perout_event_ch >= 0) {
+ /* set target to far in the future, effectively disabling it */
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_SEC_X(ptp->perout_event_ch),
+ 0xFFFF0000);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_NS_X(ptp->perout_event_ch),
+ 0);
+
+ general_config = lan743x_csr_read(adapter, PTP_GENERAL_CONFIG);
+ general_config |= PTP_GENERAL_CONFIG_RELOAD_ADD_X_
+ (ptp->perout_event_ch);
+ lan743x_csr_write(adapter, PTP_GENERAL_CONFIG, general_config);
+ lan743x_ptp_release_event_ch(adapter, ptp->perout_event_ch);
+ ptp->perout_event_ch = -1;
+ }
+}
+
+static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
+ struct ptp_perout_request *perout)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ u32 period_sec = 0, period_nsec = 0;
+ u32 start_sec = 0, start_nsec = 0;
+ u32 general_config = 0;
+ int pulse_width = 0;
+ int perout_bit = 0;
+
+ if (!on) {
+ lan743x_ptp_perout_off(adapter);
+ return 0;
+ }
+
+ if (ptp->perout_event_ch >= 0 ||
+ ptp->perout_gpio_bit >= 0) {
+ /* already on, turn off first */
+ lan743x_ptp_perout_off(adapter);
+ }
+
+ ptp->perout_event_ch = lan743x_ptp_reserve_event_ch(adapter);
+ if (ptp->perout_event_ch < 0) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "Failed to reserve event channel for PEROUT\n");
+ goto failed;
+ }
+
+ switch (adapter->csr.id_rev & ID_REV_ID_MASK_) {
+ case ID_REV_ID_LAN7430_:
+ perout_bit = 2;/* GPIO 2 is preferred on EVB LAN7430 */
+ break;
+ case ID_REV_ID_LAN7431_:
+ perout_bit = 4;/* GPIO 4 is preferred on EVB LAN7431 */
+ break;
+ }
+
+ ptp->perout_gpio_bit = lan743x_gpio_rsrv_ptp_out(adapter,
+ perout_bit,
+ ptp->perout_event_ch);
+
+ if (ptp->perout_gpio_bit < 0) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "Failed to reserve gpio %d for PEROUT\n",
+ perout_bit);
+ goto failed;
+ }
+
+ start_sec = perout->start.sec;
+ start_sec += perout->start.nsec / 1000000000;
+ start_nsec = perout->start.nsec % 1000000000;
+
+ period_sec = perout->period.sec;
+ period_sec += perout->period.nsec / 1000000000;
+ period_nsec = perout->period.nsec % 1000000000;
+
+ if (period_sec == 0) {
+ if (period_nsec >= 400000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+ } else if (period_nsec >= 20000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_;
+ } else if (period_nsec >= 2000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_;
+ } else if (period_nsec >= 200000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_;
+ } else if (period_nsec >= 20000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_;
+ } else if (period_nsec >= 200) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_;
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "perout period too small, minimum is 200nS\n");
+ goto failed;
+ }
+ } else {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+ }
+
+ /* turn off by setting target far in future */
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_SEC_X(ptp->perout_event_ch),
+ 0xFFFF0000);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_NS_X(ptp->perout_event_ch), 0);
+
+ /* Configure to pulse every period */
+ general_config = lan743x_csr_read(adapter, PTP_GENERAL_CONFIG);
+ general_config &= ~(PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_
+ (ptp->perout_event_ch));
+ general_config |= PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_
+ (ptp->perout_event_ch, pulse_width);
+ general_config &= ~PTP_GENERAL_CONFIG_RELOAD_ADD_X_
+ (ptp->perout_event_ch);
+ lan743x_csr_write(adapter, PTP_GENERAL_CONFIG, general_config);
+
+ /* set the reload to one toggle cycle */
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_RELOAD_SEC_X(ptp->perout_event_ch),
+ period_sec);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_RELOAD_NS_X(ptp->perout_event_ch),
+ period_nsec);
+
+ /* set the start time */
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_SEC_X(ptp->perout_event_ch),
+ start_sec);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_NS_X(ptp->perout_event_ch),
+ start_nsec);
+
+ return 0;
+
+failed:
+ lan743x_ptp_perout_off(adapter);
+ return -ENODEV;
+}
+
+static int lan743x_ptpci_enable(struct ptp_clock_info *ptpci,
+ struct ptp_clock_request *request, int on)
+{
+ struct lan743x_ptp *ptp =
+ container_of(ptpci, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(ptp, struct lan743x_adapter, ptp);
+
+ if (request) {
+ switch (request->type) {
+ case PTP_CLK_REQ_EXTTS:
+ return -EINVAL;
+ case PTP_CLK_REQ_PEROUT:
+ if (request->perout.index == 0)
+ return lan743x_ptp_perout(adapter, on,
+ &request->perout);
+ return -EINVAL;
+ case PTP_CLK_REQ_PPS:
+ return -EINVAL;
+ default:
+ netif_err(adapter, drv, adapter->netdev,
+ "request->type == %d, Unknown\n",
+ request->type);
+ break;
+ }
+ } else {
+ netif_err(adapter, drv, adapter->netdev, "request == NULL\n");
+ }
+ return 0;
+}
+
+static long lan743x_ptpci_do_aux_work(struct ptp_clock_info *ptpci)
+{
+ struct lan743x_ptp *ptp =
+ container_of(ptpci, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(ptp, struct lan743x_adapter, ptp);
+ u32 cap_info, cause, header, nsec, seconds;
+ bool new_timestamp_available = false;
+ int count = 0;
+
+ while ((count < 100) &&
+ (lan743x_csr_read(adapter, PTP_INT_STS) & PTP_INT_BIT_TX_TS_)) {
+ count++;
+ cap_info = lan743x_csr_read(adapter, PTP_CAP_INFO);
+
+ if (PTP_CAP_INFO_TX_TS_CNT_GET_(cap_info) > 0) {
+ seconds = lan743x_csr_read(adapter,
+ PTP_TX_EGRESS_SEC);
+ nsec = lan743x_csr_read(adapter, PTP_TX_EGRESS_NS);
+ cause = (nsec &
+ PTP_TX_EGRESS_NS_CAPTURE_CAUSE_MASK_);
+ header = lan743x_csr_read(adapter,
+ PTP_TX_MSG_HEADER);
+
+ if (cause == PTP_TX_EGRESS_NS_CAPTURE_CAUSE_SW_) {
+ nsec &= PTP_TX_EGRESS_NS_TS_NS_MASK_;
+ lan743x_ptp_tx_ts_enqueue_ts(adapter,
+ seconds, nsec,
+ header);
+ new_timestamp_available = true;
+ } else if (cause ==
+ PTP_TX_EGRESS_NS_CAPTURE_CAUSE_AUTO_) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Auto capture cause not supported\n");
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "unknown tx timestamp capture cause\n");
+ }
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "TX TS INT but no TX TS CNT\n");
+ }
+ lan743x_csr_write(adapter, PTP_INT_STS, PTP_INT_BIT_TX_TS_);
+ }
+
+ if (new_timestamp_available)
+ lan743x_ptp_tx_ts_complete(adapter);
+
+ lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_1588_);
+
+ return -1;
+}
+
+static void lan743x_ptp_clock_get(struct lan743x_adapter *adapter,
+ u32 *seconds, u32 *nano_seconds,
+ u32 *sub_nano_seconds)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_lock(&ptp->command_lock);
+
+ lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_READ_);
+ lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_CLOCK_READ_);
+
+ if (seconds)
+ (*seconds) = lan743x_csr_read(adapter, PTP_CLOCK_SEC);
+
+ if (nano_seconds)
+ (*nano_seconds) = lan743x_csr_read(adapter, PTP_CLOCK_NS);
+
+ if (sub_nano_seconds)
+ (*sub_nano_seconds) =
+ lan743x_csr_read(adapter, PTP_CLOCK_SUBNS);
+
+ mutex_unlock(&ptp->command_lock);
+}
+
+static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter,
+ s64 time_step_ns)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ u32 nano_seconds_step = 0;
+ u64 abs_time_step_ns = 0;
+ u32 unsigned_seconds = 0;
+ u32 nano_seconds = 0;
+ u32 remainder = 0;
+ s32 seconds = 0;
+
+ if (time_step_ns > 15000000000LL) {
+ /* convert to clock set */
+ lan743x_ptp_clock_get(adapter, &unsigned_seconds,
+ &nano_seconds, NULL);
+ unsigned_seconds += div_u64_rem(time_step_ns, 1000000000LL,
+ &remainder);
+ nano_seconds += remainder;
+ if (nano_seconds >= 1000000000) {
+ unsigned_seconds++;
+ nano_seconds -= 1000000000;
+ }
+ lan743x_ptp_clock_set(adapter, unsigned_seconds,
+ nano_seconds, 0);
+ return;
+ } else if (time_step_ns < -15000000000LL) {
+ /* convert to clock set */
+ time_step_ns = -time_step_ns;
+
+ lan743x_ptp_clock_get(adapter, &unsigned_seconds,
+ &nano_seconds, NULL);
+ unsigned_seconds -= div_u64_rem(time_step_ns, 1000000000LL,
+ &remainder);
+ nano_seconds_step = remainder;
+ if (nano_seconds < nano_seconds_step) {
+ unsigned_seconds--;
+ nano_seconds += 1000000000;
+ }
+ nano_seconds -= nano_seconds_step;
+ lan743x_ptp_clock_set(adapter, unsigned_seconds,
+ nano_seconds, 0);
+ return;
+ }
+
+ /* do clock step */
+ if (time_step_ns >= 0) {
+ abs_time_step_ns = (u64)(time_step_ns);
+ seconds = (s32)div_u64_rem(abs_time_step_ns, 1000000000,
+ &remainder);
+ nano_seconds = (u32)remainder;
+ } else {
+ abs_time_step_ns = (u64)(-time_step_ns);
+ seconds = -((s32)div_u64_rem(abs_time_step_ns, 1000000000,
+ &remainder));
+ nano_seconds = (u32)remainder;
+ if (nano_seconds > 0) {
+ /* subtracting nano seconds is not allowed
+ * convert to subtracting from seconds,
+ * and adding to nanoseconds
+ */
+ seconds--;
+ nano_seconds = (1000000000 - nano_seconds);
+ }
+ }
+
+ if (nano_seconds > 0) {
+ /* add 8 ns to cover the likely normal increment */
+ nano_seconds += 8;
+ }
+
+ if (nano_seconds >= 1000000000) {
+ /* carry into seconds */
+ seconds++;
+ nano_seconds -= 1000000000;
+ }
+
+ while (seconds) {
+ mutex_lock(&ptp->command_lock);
+ if (seconds > 0) {
+ u32 adjustment_value = (u32)seconds;
+
+ if (adjustment_value > 0xF)
+ adjustment_value = 0xF;
+ lan743x_csr_write(adapter, PTP_CLOCK_STEP_ADJ,
+ PTP_CLOCK_STEP_ADJ_DIR_ |
+ adjustment_value);
+ seconds -= ((s32)adjustment_value);
+ } else {
+ u32 adjustment_value = (u32)(-seconds);
+
+ if (adjustment_value > 0xF)
+ adjustment_value = 0xF;
+ lan743x_csr_write(adapter, PTP_CLOCK_STEP_ADJ,
+ adjustment_value);
+ seconds += ((s32)adjustment_value);
+ }
+ lan743x_csr_write(adapter, PTP_CMD_CTL,
+ PTP_CMD_CTL_PTP_CLOCK_STEP_SEC_);
+ lan743x_ptp_wait_till_cmd_done(adapter,
+ PTP_CMD_CTL_PTP_CLOCK_STEP_SEC_);
+ mutex_unlock(&ptp->command_lock);
+ }
+ if (nano_seconds) {
+ mutex_lock(&ptp->command_lock);
+ lan743x_csr_write(adapter, PTP_CLOCK_STEP_ADJ,
+ PTP_CLOCK_STEP_ADJ_DIR_ |
+ (nano_seconds &
+ PTP_CLOCK_STEP_ADJ_VALUE_MASK_));
+ lan743x_csr_write(adapter, PTP_CMD_CTL,
+ PTP_CMD_CTL_PTP_CLK_STP_NSEC_);
+ lan743x_ptp_wait_till_cmd_done(adapter,
+ PTP_CMD_CTL_PTP_CLK_STP_NSEC_);
+ mutex_unlock(&ptp->command_lock);
+ }
+}
+
+void lan743x_ptp_isr(void *context)
+{
+ struct lan743x_adapter *adapter = (struct lan743x_adapter *)context;
+ struct lan743x_ptp *ptp = NULL;
+ int enable_flag = 1;
+ u32 ptp_int_sts = 0;
+
+ ptp = &adapter->ptp;
+
+ lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_1588_);
+
+ ptp_int_sts = lan743x_csr_read(adapter, PTP_INT_STS);
+ ptp_int_sts &= lan743x_csr_read(adapter, PTP_INT_EN_SET);
+
+ if (ptp_int_sts & PTP_INT_BIT_TX_TS_) {
+ ptp_schedule_worker(ptp->ptp_clock, 0);
+ enable_flag = 0;/* tasklet will re-enable later */
+ }
+ if (ptp_int_sts & PTP_INT_BIT_TX_SWTS_ERR_) {
+ netif_err(adapter, drv, adapter->netdev,
+ "PTP TX Software Timestamp Error\n");
+ /* clear int status bit */
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_BIT_TX_SWTS_ERR_);
+ }
+ if (ptp_int_sts & PTP_INT_BIT_TIMER_B_) {
+ /* clear int status bit */
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_BIT_TIMER_B_);
+ }
+ if (ptp_int_sts & PTP_INT_BIT_TIMER_A_) {
+ /* clear int status bit */
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_BIT_TIMER_A_);
+ }
+
+ if (enable_flag) {
+ /* re-enable isr */
+ lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_1588_);
+ }
+}
+
+static void lan743x_ptp_tx_ts_enqueue_skb(struct lan743x_adapter *adapter,
+ struct sk_buff *skb, bool ignore_sync)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ spin_lock_bh(&ptp->tx_ts_lock);
+ if (ptp->tx_ts_skb_queue_size < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS) {
+ ptp->tx_ts_skb_queue[ptp->tx_ts_skb_queue_size] = skb;
+ if (ignore_sync)
+ ptp->tx_ts_ignore_sync_queue |=
+ BIT(ptp->tx_ts_skb_queue_size);
+ ptp->tx_ts_skb_queue_size++;
+ } else {
+ /* this should never happen, so long as the tx channel
+ * calls and honors the result from
+ * lan743x_ptp_request_tx_timestamp
+ */
+ netif_err(adapter, drv, adapter->netdev,
+ "tx ts skb queue overflow\n");
+ dev_kfree_skb(skb);
+ }
+ spin_unlock_bh(&ptp->tx_ts_lock);
+}
+
+static void lan743x_ptp_sync_to_system_clock(struct lan743x_adapter *adapter)
+{
+ struct timespec64 ts;
+
+ memset(&ts, 0, sizeof(ts));
+ timekeeping_clocktai64(&ts);
+
+ lan743x_ptp_clock_set(adapter, ts.tv_sec, ts.tv_nsec, 0);
+}
+
+void lan743x_ptp_update_latency(struct lan743x_adapter *adapter,
+ u32 link_speed)
+{
+ switch (link_speed) {
+ case 10:
+ lan743x_csr_write(adapter, PTP_LATENCY,
+ PTP_LATENCY_TX_SET_(0) |
+ PTP_LATENCY_RX_SET_(0));
+ break;
+ case 100:
+ lan743x_csr_write(adapter, PTP_LATENCY,
+ PTP_LATENCY_TX_SET_(181) |
+ PTP_LATENCY_RX_SET_(594));
+ break;
+ case 1000:
+ lan743x_csr_write(adapter, PTP_LATENCY,
+ PTP_LATENCY_TX_SET_(30) |
+ PTP_LATENCY_RX_SET_(525));
+ break;
+ }
+}
+
+int lan743x_ptp_init(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_init(&ptp->command_lock);
+ spin_lock_init(&ptp->tx_ts_lock);
+ ptp->used_event_ch = 0;
+ ptp->perout_event_ch = -1;
+ ptp->perout_gpio_bit = -1;
+ return 0;
+}
+
+int lan743x_ptp_open(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ int ret = -ENODEV;
+ u32 temp;
+
+ lan743x_ptp_reset(adapter);
+ lan743x_ptp_sync_to_system_clock(adapter);
+ temp = lan743x_csr_read(adapter, PTP_TX_MOD2);
+ temp |= PTP_TX_MOD2_TX_PTP_CLR_UDPV4_CHKSUM_;
+ lan743x_csr_write(adapter, PTP_TX_MOD2, temp);
+ lan743x_ptp_enable(adapter);
+ lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_1588_);
+ lan743x_csr_write(adapter, PTP_INT_EN_SET,
+ PTP_INT_BIT_TX_SWTS_ERR_ | PTP_INT_BIT_TX_TS_);
+ ptp->flags |= PTP_FLAG_ISR_ENABLED;
+
+ if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK))
+ return 0;
+
+ snprintf(ptp->pin_config[0].name, 32, "lan743x_ptp_pin_0");
+ ptp->pin_config[0].index = 0;
+ ptp->pin_config[0].func = PTP_PF_PEROUT;
+ ptp->pin_config[0].chan = 0;
+
+ ptp->ptp_clock_info.owner = THIS_MODULE;
+ snprintf(ptp->ptp_clock_info.name, 16, "%pm",
+ adapter->netdev->dev_addr);
+ ptp->ptp_clock_info.max_adj = LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB;
+ ptp->ptp_clock_info.n_alarm = 0;
+ ptp->ptp_clock_info.n_ext_ts = 0;
+ ptp->ptp_clock_info.n_per_out = 1;
+ ptp->ptp_clock_info.n_pins = 0;
+ ptp->ptp_clock_info.pps = 0;
+ ptp->ptp_clock_info.pin_config = NULL;
+ ptp->ptp_clock_info.adjfine = lan743x_ptpci_adjfine;
+ ptp->ptp_clock_info.adjfreq = lan743x_ptpci_adjfreq;
+ ptp->ptp_clock_info.adjtime = lan743x_ptpci_adjtime;
+ ptp->ptp_clock_info.gettime64 = lan743x_ptpci_gettime64;
+ ptp->ptp_clock_info.getcrosststamp = NULL;
+ ptp->ptp_clock_info.settime64 = lan743x_ptpci_settime64;
+ ptp->ptp_clock_info.enable = lan743x_ptpci_enable;
+ ptp->ptp_clock_info.do_aux_work = lan743x_ptpci_do_aux_work;
+ ptp->ptp_clock_info.verify = NULL;
+
+ ptp->ptp_clock = ptp_clock_register(&ptp->ptp_clock_info,
+ &adapter->pdev->dev);
+
+ if (IS_ERR(ptp->ptp_clock)) {
+ netif_err(adapter, ifup, adapter->netdev,
+ "ptp_clock_register failed\n");
+ goto done;
+ }
+ ptp->flags |= PTP_FLAG_PTP_CLOCK_REGISTERED;
+ netif_info(adapter, ifup, adapter->netdev,
+ "successfully registered ptp clock\n");
+
+ return 0;
+done:
+ lan743x_ptp_close(adapter);
+ return ret;
+}
+
+void lan743x_ptp_close(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ int index;
+
+ if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
+ ptp->flags & PTP_FLAG_PTP_CLOCK_REGISTERED) {
+ ptp_clock_unregister(ptp->ptp_clock);
+ ptp->ptp_clock = NULL;
+ ptp->flags &= ~PTP_FLAG_PTP_CLOCK_REGISTERED;
+ netif_info(adapter, drv, adapter->netdev,
+ "ptp clock unregister\n");
+ }
+
+ if (ptp->flags & PTP_FLAG_ISR_ENABLED) {
+ lan743x_csr_write(adapter, PTP_INT_EN_CLR,
+ PTP_INT_BIT_TX_SWTS_ERR_ |
+ PTP_INT_BIT_TX_TS_);
+ lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_1588_);
+ ptp->flags &= ~PTP_FLAG_ISR_ENABLED;
+ }
+
+ /* clean up pending timestamp requests */
+ lan743x_ptp_tx_ts_complete(adapter);
+ spin_lock_bh(&ptp->tx_ts_lock);
+ for (index = 0;
+ index < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS;
+ index++) {
+ struct sk_buff *skb = ptp->tx_ts_skb_queue[index];
+
+ if (skb)
+ dev_kfree_skb(skb);
+ ptp->tx_ts_skb_queue[index] = NULL;
+ ptp->tx_ts_seconds_queue[index] = 0;
+ ptp->tx_ts_nseconds_queue[index] = 0;
+ }
+ ptp->tx_ts_skb_queue_size = 0;
+ ptp->tx_ts_queue_size = 0;
+ ptp->pending_tx_timestamps = 0;
+ spin_unlock_bh(&ptp->tx_ts_lock);
+
+ lan743x_ptp_disable(adapter);
+}
+
+void lan743x_ptp_set_sync_ts_insert(struct lan743x_adapter *adapter,
+ bool ts_insert_enable)
+{
+ u32 ptp_tx_mod = lan743x_csr_read(adapter, PTP_TX_MOD);
+
+ if (ts_insert_enable)
+ ptp_tx_mod |= PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_;
+ else
+ ptp_tx_mod &= ~PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_;
+
+ lan743x_csr_write(adapter, PTP_TX_MOD, ptp_tx_mod);
+}
+
+static bool lan743x_ptp_is_enabled(struct lan743x_adapter *adapter)
+{
+ if (lan743x_csr_read(adapter, PTP_CMD_CTL) & PTP_CMD_CTL_PTP_ENABLE_)
+ return true;
+ return false;
+}
+
+static void lan743x_ptp_enable(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_lock(&ptp->command_lock);
+
+ if (lan743x_ptp_is_enabled(adapter)) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "PTP already enabled\n");
+ goto done;
+ }
+ lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_ENABLE_);
+done:
+ mutex_unlock(&ptp->command_lock);
+}
+
+static void lan743x_ptp_disable(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_lock(&ptp->command_lock);
+ if (!lan743x_ptp_is_enabled(adapter)) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "PTP already disabled\n");
+ goto done;
+ }
+ lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_DISABLE_);
+ lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_ENABLE_);
+done:
+ mutex_unlock(&ptp->command_lock);
+}
+
+static void lan743x_ptp_reset(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_lock(&ptp->command_lock);
+
+ if (lan743x_ptp_is_enabled(adapter)) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Attempting reset while enabled\n");
+ goto done;
+ }
+
+ lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_RESET_);
+ lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_RESET_);
+done:
+ mutex_unlock(&ptp->command_lock);
+}
+
+static void lan743x_ptp_clock_set(struct lan743x_adapter *adapter,
+ u32 seconds, u32 nano_seconds,
+ u32 sub_nano_seconds)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_lock(&ptp->command_lock);
+
+ lan743x_csr_write(adapter, PTP_CLOCK_SEC, seconds);
+ lan743x_csr_write(adapter, PTP_CLOCK_NS, nano_seconds);
+ lan743x_csr_write(adapter, PTP_CLOCK_SUBNS, sub_nano_seconds);
+
+ lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_LOAD_);
+ lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_CLOCK_LOAD_);
+ mutex_unlock(&ptp->command_lock);
+}
+
+bool lan743x_ptp_request_tx_timestamp(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ bool result = false;
+
+ spin_lock_bh(&ptp->tx_ts_lock);
+ if (ptp->pending_tx_timestamps < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS) {
+ /* request granted */
+ ptp->pending_tx_timestamps++;
+ result = true;
+ }
+ spin_unlock_bh(&ptp->tx_ts_lock);
+ return result;
+}
+
+void lan743x_ptp_unrequest_tx_timestamp(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ spin_lock_bh(&ptp->tx_ts_lock);
+ if (ptp->pending_tx_timestamps > 0)
+ ptp->pending_tx_timestamps--;
+ else
+ netif_err(adapter, drv, adapter->netdev,
+ "unrequest failed, pending_tx_timestamps==0\n");
+ spin_unlock_bh(&ptp->tx_ts_lock);
+}
+
+void lan743x_ptp_tx_timestamp_skb(struct lan743x_adapter *adapter,
+ struct sk_buff *skb, bool ignore_sync)
+{
+ lan743x_ptp_tx_ts_enqueue_skb(adapter, skb, ignore_sync);
+
+ lan743x_ptp_tx_ts_complete(adapter);
+}
+
+int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ struct hwtstamp_config config;
+ int ret = 0;
+ int index;
+
+ if (!ifr) {
+ netif_err(adapter, drv, adapter->netdev,
+ "SIOCSHWTSTAMP, ifr == NULL\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ if (config.flags) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "ignoring hwtstamp_config.flags == 0x%08X, expected 0\n",
+ config.flags);
+ }
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ for (index = 0; index < LAN743X_MAX_TX_CHANNELS;
+ index++)
+ lan743x_tx_set_timestamping_mode(&adapter->tx[index],
+ false, false);
+ lan743x_ptp_set_sync_ts_insert(adapter, false);
+ break;
+ case HWTSTAMP_TX_ON:
+ for (index = 0; index < LAN743X_MAX_TX_CHANNELS;
+ index++)
+ lan743x_tx_set_timestamping_mode(&adapter->tx[index],
+ true, false);
+ lan743x_ptp_set_sync_ts_insert(adapter, false);
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ for (index = 0; index < LAN743X_MAX_TX_CHANNELS;
+ index++)
+ lan743x_tx_set_timestamping_mode(&adapter->tx[index],
+ true, true);
+
+ lan743x_ptp_set_sync_ts_insert(adapter, true);
+ break;
+ default:
+ netif_warn(adapter, drv, adapter->netdev,
+ " tx_type = %d, UNKNOWN\n", config.tx_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret)
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(config)) ? -EFAULT : 0;
+ return ret;
+}
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.h b/drivers/net/ethernet/microchip/lan743x_ptp.h
new file mode 100644
index 000000000000..5fc1b3cd5e33
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2018 Microchip Technology Inc. */
+
+#ifndef _LAN743X_PTP_H
+#define _LAN743X_PTP_H
+
+#include "linux/ptp_clock_kernel.h"
+#include "linux/netdevice.h"
+
+struct lan743x_adapter;
+
+/* GPIO */
+struct lan743x_gpio {
+ /* gpio_lock: used to prevent concurrent access to gpio settings */
+ spinlock_t gpio_lock;
+
+ int used_bits;
+ int output_bits;
+ int ptp_bits;
+ u32 gpio_cfg0;
+ u32 gpio_cfg1;
+ u32 gpio_cfg2;
+ u32 gpio_cfg3;
+};
+
+int lan743x_gpio_init(struct lan743x_adapter *adapter);
+
+void lan743x_ptp_isr(void *context);
+bool lan743x_ptp_request_tx_timestamp(struct lan743x_adapter *adapter);
+void lan743x_ptp_unrequest_tx_timestamp(struct lan743x_adapter *adapter);
+void lan743x_ptp_tx_timestamp_skb(struct lan743x_adapter *adapter,
+ struct sk_buff *skb, bool ignore_sync);
+int lan743x_ptp_init(struct lan743x_adapter *adapter);
+int lan743x_ptp_open(struct lan743x_adapter *adapter);
+void lan743x_ptp_close(struct lan743x_adapter *adapter);
+void lan743x_ptp_update_latency(struct lan743x_adapter *adapter,
+ u32 link_speed);
+
+int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+
+#define LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS (4)
+
+#define PTP_FLAG_PTP_CLOCK_REGISTERED BIT(1)
+#define PTP_FLAG_ISR_ENABLED BIT(2)
+
+struct lan743x_ptp {
+ int flags;
+
+ /* command_lock: used to prevent concurrent ptp commands */
+ struct mutex command_lock;
+
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ struct ptp_pin_desc pin_config[1];
+
+#define LAN743X_PTP_NUMBER_OF_EVENT_CHANNELS (2)
+ unsigned long used_event_ch;
+
+ int perout_event_ch;
+ int perout_gpio_bit;
+
+ /* tx_ts_lock: used to prevent concurrent access to timestamp arrays */
+ spinlock_t tx_ts_lock;
+ int pending_tx_timestamps;
+ struct sk_buff *tx_ts_skb_queue[LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS];
+ unsigned int tx_ts_ignore_sync_queue;
+ int tx_ts_skb_queue_size;
+ u32 tx_ts_seconds_queue[LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS];
+ u32 tx_ts_nseconds_queue[LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS];
+ u32 tx_ts_header_queue[LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS];
+ int tx_ts_queue_size;
+};
+
+#endif /* _LAN743X_PTP_H */
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 776a8a9be8e3..1a4f2bb48ead 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -148,12 +148,191 @@ static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot)
return 0;
}
+static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask)
+{
+ /* Select the VID to configure */
+ ocelot_write(ocelot, ANA_TABLES_VLANTIDX_V_INDEX(vid),
+ ANA_TABLES_VLANTIDX);
+ /* Set the vlan port members mask and issue a write command */
+ ocelot_write(ocelot, ANA_TABLES_VLANACCESS_VLAN_PORT_MASK(mask) |
+ ANA_TABLES_VLANACCESS_CMD_WRITE,
+ ANA_TABLES_VLANACCESS);
+
+ return ocelot_vlant_wait_for_completion(ocelot);
+}
+
+static void ocelot_vlan_mode(struct ocelot_port *port,
+ netdev_features_t features)
+{
+ struct ocelot *ocelot = port->ocelot;
+ u8 p = port->chip_port;
+ u32 val;
+
+ /* Filtering */
+ val = ocelot_read(ocelot, ANA_VLANMASK);
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ val |= BIT(p);
+ else
+ val &= ~BIT(p);
+ ocelot_write(ocelot, val, ANA_VLANMASK);
+}
+
+static void ocelot_vlan_port_apply(struct ocelot *ocelot,
+ struct ocelot_port *port)
+{
+ u32 val;
+
+ /* Ingress clasification (ANA_PORT_VLAN_CFG) */
+ /* Default vlan to clasify for untagged frames (may be zero) */
+ val = ANA_PORT_VLAN_CFG_VLAN_VID(port->pvid);
+ if (port->vlan_aware)
+ val |= ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1);
+
+ ocelot_rmw_gix(ocelot, val,
+ ANA_PORT_VLAN_CFG_VLAN_VID_M |
+ ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M,
+ ANA_PORT_VLAN_CFG, port->chip_port);
+
+ /* Drop frames with multicast source address */
+ val = ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA;
+ if (port->vlan_aware && !port->vid)
+ /* If port is vlan-aware and tagged, drop untagged and priority
+ * tagged frames.
+ */
+ val |= ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
+ ocelot_write_gix(ocelot, val, ANA_PORT_DROP_CFG, port->chip_port);
+
+ /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q. */
+ val = REW_TAG_CFG_TAG_TPID_CFG(0);
+
+ if (port->vlan_aware) {
+ if (port->vid)
+ /* Tag all frames except when VID == DEFAULT_VLAN */
+ val |= REW_TAG_CFG_TAG_CFG(1);
+ else
+ /* Tag all frames */
+ val |= REW_TAG_CFG_TAG_CFG(3);
+ }
+ ocelot_rmw_gix(ocelot, val,
+ REW_TAG_CFG_TAG_TPID_CFG_M |
+ REW_TAG_CFG_TAG_CFG_M,
+ REW_TAG_CFG, port->chip_port);
+
+ /* Set default VLAN and tag type to 8021Q. */
+ val = REW_PORT_VLAN_CFG_PORT_TPID(ETH_P_8021Q) |
+ REW_PORT_VLAN_CFG_PORT_VID(port->vid);
+ ocelot_rmw_gix(ocelot, val,
+ REW_PORT_VLAN_CFG_PORT_TPID_M |
+ REW_PORT_VLAN_CFG_PORT_VID_M,
+ REW_PORT_VLAN_CFG, port->chip_port);
+}
+
+static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid,
+ bool untagged)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+ int ret;
+
+ /* Add the port MAC address to with the right VLAN information */
+ ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, vid,
+ ENTRYTYPE_LOCKED);
+
+ /* Make the port a member of the VLAN */
+ ocelot->vlan_mask[vid] |= BIT(port->chip_port);
+ ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
+ if (ret)
+ return ret;
+
+ /* Default ingress vlan classification */
+ if (pvid)
+ port->pvid = vid;
+
+ /* Untagged egress vlan clasification */
+ if (untagged)
+ port->vid = vid;
+
+ ocelot_vlan_port_apply(ocelot, port);
+
+ return 0;
+}
+
+static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+ int ret;
+
+ /* 8021q removes VID 0 on module unload for all interfaces
+ * with VLAN filtering feature. We need to keep it to receive
+ * untagged traffic.
+ */
+ if (vid == 0)
+ return 0;
+
+ /* Del the port MAC address to with the right VLAN information */
+ ocelot_mact_forget(ocelot, dev->dev_addr, vid);
+
+ /* Stop the port from being a member of the vlan */
+ ocelot->vlan_mask[vid] &= ~BIT(port->chip_port);
+ ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
+ if (ret)
+ return ret;
+
+ /* Ingress */
+ if (port->pvid == vid)
+ port->pvid = 0;
+
+ /* Egress */
+ if (port->vid == vid)
+ port->vid = 0;
+
+ ocelot_vlan_port_apply(ocelot, port);
+
+ return 0;
+}
+
static void ocelot_vlan_init(struct ocelot *ocelot)
{
+ u16 port, vid;
+
/* Clear VLAN table, by default all ports are members of all VLANs */
ocelot_write(ocelot, ANA_TABLES_VLANACCESS_CMD_INIT,
ANA_TABLES_VLANACCESS);
ocelot_vlant_wait_for_completion(ocelot);
+
+ /* Configure the port VLAN memberships */
+ for (vid = 1; vid < VLAN_N_VID; vid++) {
+ ocelot->vlan_mask[vid] = 0;
+ ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
+ }
+
+ /* Because VLAN filtering is enabled, we need VID 0 to get untagged
+ * traffic. It is added automatically if 8021q module is loaded, but
+ * we can't rely on it since module may be not loaded.
+ */
+ ocelot->vlan_mask[0] = GENMASK(ocelot->num_phys_ports - 1, 0);
+ ocelot_vlant_set_mask(ocelot, 0, ocelot->vlan_mask[0]);
+
+ /* Configure the CPU port to be VLAN aware */
+ ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) |
+ ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
+ ANA_PORT_VLAN_CFG, ocelot->num_phys_ports);
+
+ /* Set vlan ingress filter mask to all ports but the CPU port by
+ * default.
+ */
+ ocelot_write(ocelot, GENMASK(9, 0), ANA_VLANMASK);
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ ocelot_write_gix(ocelot, 0, REW_PORT_VLAN_CFG, port);
+ ocelot_write_gix(ocelot, 0, REW_TAG_CFG, port);
+ }
}
/* Watermark encode
@@ -539,6 +718,20 @@ static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct ocelot_port *port = netdev_priv(dev);
struct ocelot *ocelot = port->ocelot;
+ if (!vid) {
+ if (!port->vlan_aware)
+ /* If the bridge is not VLAN aware and no VID was
+ * provided, set it to pvid to ensure the MAC entry
+ * matches incoming untagged packets
+ */
+ vid = port->pvid;
+ else
+ /* If the bridge is VLAN aware a VID must be provided as
+ * otherwise the learnt entry wouldn't match any frame.
+ */
+ return -EINVAL;
+ }
+
return ocelot_mact_learn(ocelot, port->chip_port, addr, vid,
ENTRYTYPE_NORMAL);
}
@@ -690,6 +883,30 @@ end:
return ret;
}
+static int ocelot_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
+ u16 vid)
+{
+ return ocelot_vlan_vid_add(dev, vid, false, true);
+}
+
+static int ocelot_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
+ u16 vid)
+{
+ return ocelot_vlan_vid_del(dev, vid);
+}
+
+static int ocelot_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ netdev_features_t changed = dev->features ^ features;
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER)
+ ocelot_vlan_mode(port, features);
+
+ return 0;
+}
+
static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_open = ocelot_port_open,
.ndo_stop = ocelot_port_stop,
@@ -701,6 +918,9 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_fdb_add = ocelot_fdb_add,
.ndo_fdb_del = ocelot_fdb_del,
.ndo_fdb_dump = ocelot_fdb_dump,
+ .ndo_vlan_rx_add_vid = ocelot_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid,
+ .ndo_set_features = ocelot_set_features,
};
static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data)
@@ -780,6 +1000,8 @@ static const struct ethtool_ops ocelot_ethtool_ops = {
.get_strings = ocelot_get_strings,
.get_ethtool_stats = ocelot_get_ethtool_stats,
.get_sset_count = ocelot_get_sset_count,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int ocelot_port_attr_get(struct net_device *dev,
@@ -914,6 +1136,10 @@ static int ocelot_port_attr_set(struct net_device *dev,
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
ocelot_port_attr_ageing_set(ocelot_port, attr->u.ageing_time);
break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ ocelot_port->vlan_aware = attr->u.vlan_filtering;
+ ocelot_vlan_port_apply(ocelot_port->ocelot, ocelot_port);
+ break;
case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
ocelot_port_attr_mc_set(ocelot_port, !attr->u.mc_disabled);
break;
@@ -925,6 +1151,40 @@ static int ocelot_port_attr_set(struct net_device *dev,
return err;
}
+static int ocelot_port_obj_add_vlan(struct net_device *dev,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ int ret;
+ u16 vid;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ ret = ocelot_vlan_vid_add(dev, vid,
+ vlan->flags & BRIDGE_VLAN_INFO_PVID,
+ vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ocelot_port_vlan_del_vlan(struct net_device *dev,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ int ret;
+ u16 vid;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ ret = ocelot_vlan_vid_del(dev, vid);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot,
const unsigned char *addr,
u16 vid)
@@ -951,7 +1211,7 @@ static int ocelot_port_obj_add_mdb(struct net_device *dev,
bool new = false;
if (!vid)
- vid = 1;
+ vid = port->pvid;
mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
if (!mc) {
@@ -992,7 +1252,7 @@ static int ocelot_port_obj_del_mdb(struct net_device *dev,
u16 vid = mdb->vid;
if (!vid)
- vid = 1;
+ vid = port->pvid;
mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
if (!mc)
@@ -1024,6 +1284,11 @@ static int ocelot_port_obj_add(struct net_device *dev,
int ret = 0;
switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ ret = ocelot_port_obj_add_vlan(dev,
+ SWITCHDEV_OBJ_PORT_VLAN(obj),
+ trans);
+ break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj),
trans);
@@ -1041,6 +1306,10 @@ static int ocelot_port_obj_del(struct net_device *dev,
int ret = 0;
switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ ret = ocelot_port_vlan_del_vlan(dev,
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
+ break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
ret = ocelot_port_obj_del_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj));
break;
@@ -1086,6 +1355,142 @@ static void ocelot_port_bridge_leave(struct ocelot_port *ocelot_port,
if (!ocelot->bridge_mask)
ocelot->hw_bridge_dev = NULL;
+
+ /* Clear bridge vlan settings before calling ocelot_vlan_port_apply */
+ ocelot_port->vlan_aware = 0;
+ ocelot_port->pvid = 0;
+ ocelot_port->vid = 0;
+}
+
+static void ocelot_set_aggr_pgids(struct ocelot *ocelot)
+{
+ int i, port, lag;
+
+ /* Reset destination and aggregation PGIDS */
+ for (port = 0; port < ocelot->num_phys_ports; port++)
+ ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port);
+
+ for (i = PGID_AGGR; i < PGID_SRC; i++)
+ ocelot_write_rix(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0),
+ ANA_PGID_PGID, i);
+
+ /* Now, set PGIDs for each LAG */
+ for (lag = 0; lag < ocelot->num_phys_ports; lag++) {
+ unsigned long bond_mask;
+ int aggr_count = 0;
+ u8 aggr_idx[16];
+
+ bond_mask = ocelot->lags[lag];
+ if (!bond_mask)
+ continue;
+
+ for_each_set_bit(port, &bond_mask, ocelot->num_phys_ports) {
+ // Destination mask
+ ocelot_write_rix(ocelot, bond_mask,
+ ANA_PGID_PGID, port);
+ aggr_idx[aggr_count] = port;
+ aggr_count++;
+ }
+
+ for (i = PGID_AGGR; i < PGID_SRC; i++) {
+ u32 ac;
+
+ ac = ocelot_read_rix(ocelot, ANA_PGID_PGID, i);
+ ac &= ~bond_mask;
+ ac |= BIT(aggr_idx[i % aggr_count]);
+ ocelot_write_rix(ocelot, ac, ANA_PGID_PGID, i);
+ }
+ }
+}
+
+static void ocelot_setup_lag(struct ocelot *ocelot, int lag)
+{
+ unsigned long bond_mask = ocelot->lags[lag];
+ unsigned int p;
+
+ for_each_set_bit(p, &bond_mask, ocelot->num_phys_ports) {
+ u32 port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, p);
+
+ port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M;
+
+ /* Use lag port as logical port for port i */
+ ocelot_write_gix(ocelot, port_cfg |
+ ANA_PORT_PORT_CFG_PORTID_VAL(lag),
+ ANA_PORT_PORT_CFG, p);
+ }
+}
+
+static int ocelot_port_lag_join(struct ocelot_port *ocelot_port,
+ struct net_device *bond)
+{
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int p = ocelot_port->chip_port;
+ int lag, lp;
+ struct net_device *ndev;
+ u32 bond_mask = 0;
+
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(bond, ndev) {
+ struct ocelot_port *port = netdev_priv(ndev);
+
+ bond_mask |= BIT(port->chip_port);
+ }
+ rcu_read_unlock();
+
+ lp = __ffs(bond_mask);
+
+ /* If the new port is the lowest one, use it as the logical port from
+ * now on
+ */
+ if (p == lp) {
+ lag = p;
+ ocelot->lags[p] = bond_mask;
+ bond_mask &= ~BIT(p);
+ if (bond_mask) {
+ lp = __ffs(bond_mask);
+ ocelot->lags[lp] = 0;
+ }
+ } else {
+ lag = lp;
+ ocelot->lags[lp] |= BIT(p);
+ }
+
+ ocelot_setup_lag(ocelot, lag);
+ ocelot_set_aggr_pgids(ocelot);
+
+ return 0;
+}
+
+static void ocelot_port_lag_leave(struct ocelot_port *ocelot_port,
+ struct net_device *bond)
+{
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int p = ocelot_port->chip_port;
+ u32 port_cfg;
+ int i;
+
+ /* Remove port from any lag */
+ for (i = 0; i < ocelot->num_phys_ports; i++)
+ ocelot->lags[i] &= ~BIT(ocelot_port->chip_port);
+
+ /* if it was the logical port of the lag, move the lag config to the
+ * next port
+ */
+ if (ocelot->lags[p]) {
+ int n = __ffs(ocelot->lags[p]);
+
+ ocelot->lags[n] = ocelot->lags[p];
+ ocelot->lags[p] = 0;
+
+ ocelot_setup_lag(ocelot, n);
+ }
+
+ port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, p);
+ port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M;
+ ocelot_write_gix(ocelot, port_cfg | ANA_PORT_PORT_CFG_PORTID_VAL(p),
+ ANA_PORT_PORT_CFG, p);
+
+ ocelot_set_aggr_pgids(ocelot);
}
/* Checks if the net_device instance given to us originate from our driver. */
@@ -1113,6 +1518,17 @@ static int ocelot_netdevice_port_event(struct net_device *dev,
else
ocelot_port_bridge_leave(ocelot_port,
info->upper_dev);
+
+ ocelot_vlan_port_apply(ocelot_port->ocelot,
+ ocelot_port);
+ }
+ if (netif_is_lag_master(info->upper_dev)) {
+ if (info->linking)
+ err = ocelot_port_lag_join(ocelot_port,
+ info->upper_dev);
+ else
+ ocelot_port_lag_leave(ocelot_port,
+ info->upper_dev);
}
break;
default:
@@ -1129,6 +1545,20 @@ static int ocelot_netdevice_event(struct notifier_block *unused,
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
int ret = 0;
+ if (event == NETDEV_PRECHANGEUPPER &&
+ netif_is_lag_master(info->upper_dev)) {
+ struct netdev_lag_upper_info *lag_upper_info = info->upper_info;
+ struct netlink_ext_ack *extack;
+
+ if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ extack = netdev_notifier_info_to_extack(&info->info);
+ NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
+
+ ret = -EINVAL;
+ goto notify;
+ }
+ }
+
if (netif_is_lag_master(dev)) {
struct net_device *slave;
struct list_head *iter;
@@ -1176,6 +1606,9 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
dev->ethtool_ops = &ocelot_ethtool_ops;
dev->switchdev_ops = &ocelot_port_switchdev_ops;
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN);
dev->dev_addr[ETH_ALEN - 1] += port;
ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid,
@@ -1187,6 +1620,9 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
goto err_register_netdev;
}
+ /* Basic L2 initialization */
+ ocelot_vlan_port_apply(ocelot, ocelot_port);
+
return 0;
err_register_netdev:
@@ -1201,6 +1637,11 @@ int ocelot_init(struct ocelot *ocelot)
int i, cpu = ocelot->num_phys_ports;
char queue_name[32];
+ ocelot->lags = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports,
+ sizeof(u32), GFP_KERNEL);
+ if (!ocelot->lags)
+ return -ENOMEM;
+
ocelot->stats = devm_kcalloc(ocelot->dev,
ocelot->num_phys_ports * ocelot->num_stats,
sizeof(u64), GFP_KERNEL);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 097bd12a10d4..616bec30dfa3 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -493,7 +493,7 @@ struct ocelot {
u8 num_cpu_ports;
struct ocelot_port **ports;
- u16 lags[16];
+ u32 *lags;
/* Keep track of the vlan port masks */
u32 vlan_mask[VLAN_N_VID];
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 18df7d934e81..26bb3b18f3be 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -29,7 +29,7 @@ static int ocelot_parse_ifh(u32 *ifh, struct frame_info *info)
info->port = (ifh[2] & GENMASK(14, 11)) >> 11;
info->cpuq = (ifh[3] & GENMASK(27, 20)) >> 20;
- info->tag_type = (ifh[3] & GENMASK(16, 16)) >> 16;
+ info->tag_type = (ifh[3] & BIT(16)) >> 16;
info->vid = ifh[3] & GENMASK(11, 0);
return 0;
diff --git a/drivers/net/ethernet/neterion/Kconfig b/drivers/net/ethernet/neterion/Kconfig
index 71899009c468..c26e0f70c494 100644
--- a/drivers/net/ethernet/neterion/Kconfig
+++ b/drivers/net/ethernet/neterion/Kconfig
@@ -2,8 +2,8 @@
# Exar device configuration
#
-config NET_VENDOR_EXAR
- bool "Exar devices"
+config NET_VENDOR_NETERION
+ bool "Neterion (Exar) devices"
default y
depends on PCI
---help---
@@ -11,16 +11,19 @@ config NET_VENDOR_EXAR
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
- the questions about Exar cards. If you say Y, you will be asked for
- your specific card in the following questions.
+ the questions about Neterion/Exar cards. If you say Y, you will be
+ asked for your specific card in the following questions.
-if NET_VENDOR_EXAR
+if NET_VENDOR_NETERION
config S2IO
- tristate "Exar Xframe 10Gb Ethernet Adapter"
+ tristate "Neterion (Exar) Xframe 10Gb Ethernet Adapter"
depends on PCI
---help---
This driver supports Exar Corp's Xframe Series 10Gb Ethernet Adapters.
+ These were originally released from S2IO, which renamed itself
+ Neterion. So, the adapters might be labeled as either one, depending
+ on its age.
More specific information on configuring the driver is in
<file:Documentation/networking/s2io.txt>.
@@ -29,11 +32,13 @@ config S2IO
will be called s2io.
config VXGE
- tristate "Exar X3100 Series 10GbE PCIe Server Adapter"
+ tristate "Neterion (Exar) X3100 Series 10GbE PCIe Server Adapter"
depends on PCI
---help---
This driver supports Exar Corp's X3100 Series 10 GbE PCIe
- I/O Virtualized Server Adapter.
+ I/O Virtualized Server Adapter. These were originally released from
+ Neterion, which was later acquired by Exar. So, the adapters might be
+ labeled as either one, depending on its age.
More specific information on configuring the driver is in
<file:Documentation/networking/vxge.txt>.
@@ -50,4 +55,4 @@ config VXGE_DEBUG_TRACE_ALL
the vxge driver. By default only few debug trace statements are
enabled.
-endif # NET_VENDOR_EXAR
+endif # NET_VENDOR_NETERION
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 358ed6118881..398011c87643 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -14,7 +14,6 @@
#include <linux/vmalloc.h>
#include <linux/etherdevice.h>
#include <linux/pci.h>
-#include <linux/pci_hotplug.h>
#include <linux/slab.h>
#include "vxge-traffic.h"
@@ -1095,12 +1094,9 @@ static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
{
struct __vxge_hw_device *hldev;
struct list_head *p, *n;
- u16 ret;
- if (blockpool == NULL) {
- ret = 1;
- goto exit;
- }
+ if (!blockpool)
+ return;
hldev = blockpool->hldev;
@@ -1123,8 +1119,7 @@ static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
kfree((void *)p);
}
- ret = 0;
-exit:
+
return;
}
@@ -2260,14 +2255,11 @@ static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
struct __vxge_hw_blockpool *blockpool;
struct __vxge_hw_blockpool_entry *entry = NULL;
dma_addr_t dma_addr;
- enum vxge_hw_status status = VXGE_HW_OK;
- u32 req_out;
blockpool = &devh->block_pool;
if (block_addr == NULL) {
blockpool->req_out--;
- status = VXGE_HW_FAIL;
goto exit;
}
@@ -2277,7 +2269,6 @@ static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
blockpool->req_out--;
- status = VXGE_HW_FAIL;
goto exit;
}
@@ -2292,7 +2283,7 @@ static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
else
list_del(&entry->item);
- if (entry != NULL) {
+ if (entry) {
entry->length = length;
entry->memblock = block_addr;
entry->dma_addr = dma_addr;
@@ -2300,13 +2291,10 @@ static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
entry->dma_handle = dma_h;
list_add(&entry->item, &blockpool->free_block_list);
blockpool->pool_size++;
- status = VXGE_HW_OK;
- } else
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ }
blockpool->req_out--;
- req_out = blockpool->req_out;
exit:
return;
}
@@ -2358,7 +2346,6 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
struct __vxge_hw_blockpool_entry *entry = NULL;
struct __vxge_hw_blockpool *blockpool;
void *memblock = NULL;
- enum vxge_hw_status status = VXGE_HW_OK;
blockpool = &devh->block_pool;
@@ -2368,10 +2355,8 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
&dma_object->handle,
&dma_object->acc_handle);
- if (memblock == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ if (!memblock)
goto exit;
- }
dma_object->addr = pci_map_single(devh->pdev, memblock, size,
PCI_DMA_BIDIRECTIONAL);
@@ -2380,7 +2365,6 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
dma_object->addr))) {
vxge_os_dma_free(devh->pdev, memblock,
&dma_object->acc_handle);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto exit;
}
@@ -3784,17 +3768,20 @@ vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
itable[j]);
+ /* fall through */
case 2:
*data0 |=
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
itable[j]);
+ /* fall through */
case 3:
*data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
itable[j]);
+ /* fall through */
case 4:
*data1 |=
VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
index cb87fccb9f6a..2572a4b91c7c 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
@@ -43,8 +43,6 @@
#include "fw.h"
#include "main.h"
-#define cmsg_warn(bpf, msg...) nn_dp_warn(&(bpf)->app->ctrl->dp, msg)
-
#define NFP_BPF_TAG_ALLOC_SPAN (U16_MAX / 4)
static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
@@ -441,7 +439,10 @@ void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
}
if (nfp_bpf_cmsg_get_type(skb) == CMSG_TYPE_BPF_EVENT) {
- nfp_bpf_event_output(bpf, skb);
+ if (!nfp_bpf_event_output(bpf, skb->data, skb->len))
+ dev_consume_skb_any(skb);
+ else
+ dev_kfree_skb_any(skb);
return;
}
@@ -465,3 +466,21 @@ err_unlock:
err_free:
dev_kfree_skb_any(skb);
}
+
+void
+nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data, unsigned int len)
+{
+ struct nfp_app_bpf *bpf = app->priv;
+ const struct cmsg_hdr *hdr = data;
+
+ if (unlikely(len < sizeof(struct cmsg_reply_map_simple))) {
+ cmsg_warn(bpf, "cmsg drop - too short %d!\n", len);
+ return;
+ }
+
+ if (hdr->type == CMSG_TYPE_BPF_EVENT)
+ nfp_bpf_event_output(bpf, data, len);
+ else
+ cmsg_warn(bpf, "cmsg drop - msg type %d with raw buffer!\n",
+ hdr->type);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
index 4c7972e3db63..e4f9b7ec8528 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/fw.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
@@ -51,6 +51,7 @@ enum bpf_cap_tlv_type {
NFP_BPF_CAP_TYPE_MAPS = 3,
NFP_BPF_CAP_TYPE_RANDOM = 4,
NFP_BPF_CAP_TYPE_QUEUE_SELECT = 5,
+ NFP_BPF_CAP_TYPE_ADJUST_TAIL = 6,
};
struct nfp_bpf_cap_tlv_func {
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 8a92088df0d7..eff57f7d056a 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -34,10 +34,11 @@
#define pr_fmt(fmt) "NFP net bpf: " fmt
#include <linux/bug.h>
-#include <linux/kernel.h>
#include <linux/bpf.h>
#include <linux/filter.h>
+#include <linux/kernel.h>
#include <linux/pkt_cls.h>
+#include <linux/reciprocal_div.h>
#include <linux/unistd.h>
#include "main.h"
@@ -416,6 +417,60 @@ emit_alu(struct nfp_prog *nfp_prog, swreg dst,
}
static void
+__emit_mul(struct nfp_prog *nfp_prog, enum alu_dst_ab dst_ab, u16 areg,
+ enum mul_type type, enum mul_step step, u16 breg, bool swap,
+ bool wr_both, bool dst_lmextn, bool src_lmextn)
+{
+ u64 insn;
+
+ insn = OP_MUL_BASE |
+ FIELD_PREP(OP_MUL_A_SRC, areg) |
+ FIELD_PREP(OP_MUL_B_SRC, breg) |
+ FIELD_PREP(OP_MUL_STEP, step) |
+ FIELD_PREP(OP_MUL_DST_AB, dst_ab) |
+ FIELD_PREP(OP_MUL_SW, swap) |
+ FIELD_PREP(OP_MUL_TYPE, type) |
+ FIELD_PREP(OP_MUL_WR_AB, wr_both) |
+ FIELD_PREP(OP_MUL_SRC_LMEXTN, src_lmextn) |
+ FIELD_PREP(OP_MUL_DST_LMEXTN, dst_lmextn);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_mul(struct nfp_prog *nfp_prog, swreg lreg, enum mul_type type,
+ enum mul_step step, swreg rreg)
+{
+ struct nfp_insn_ur_regs reg;
+ u16 areg;
+ int err;
+
+ if (type == MUL_TYPE_START && step != MUL_STEP_NONE) {
+ nfp_prog->error = -EINVAL;
+ return;
+ }
+
+ if (step == MUL_LAST || step == MUL_LAST_2) {
+ /* When type is step and step Number is LAST or LAST2, left
+ * source is used as destination.
+ */
+ err = swreg_to_unrestricted(lreg, reg_none(), rreg, &reg);
+ areg = reg.dst;
+ } else {
+ err = swreg_to_unrestricted(reg_none(), lreg, rreg, &reg);
+ areg = reg.areg;
+ }
+
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+
+ __emit_mul(nfp_prog, reg.dst_ab, areg, type, step, reg.breg, reg.swap,
+ reg.wr_both, reg.dst_lmextn, reg.src_lmextn);
+}
+
+static void
__emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc,
u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8,
bool zero, bool swap, bool wr_both,
@@ -670,7 +725,7 @@ static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
xfer_num = round_up(len, 4) / 4;
if (src_40bit_addr)
- addr40_offset(nfp_prog, meta->insn.src_reg, off, &src_base,
+ addr40_offset(nfp_prog, meta->insn.src_reg * 2, off, &src_base,
&off);
/* Setup PREV_ALU fields to override memory read length. */
@@ -1380,6 +1435,133 @@ static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out)
SHF_SC_R_ROT, 16);
}
+static void
+wrp_mul_u32(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg,
+ swreg rreg, bool gen_high_half)
+{
+ emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg);
+ emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_1, rreg);
+ emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_2, rreg);
+ emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_3, rreg);
+ emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_4, rreg);
+ emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_32x32, MUL_LAST, reg_none());
+ if (gen_high_half)
+ emit_mul(nfp_prog, dst_hi, MUL_TYPE_STEP_32x32, MUL_LAST_2,
+ reg_none());
+ else
+ wrp_immed(nfp_prog, dst_hi, 0);
+}
+
+static void
+wrp_mul_u16(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg,
+ swreg rreg)
+{
+ emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg);
+ emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_1, rreg);
+ emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_2, rreg);
+ emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_16x16, MUL_LAST, reg_none());
+}
+
+static int
+wrp_mul(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ bool gen_high_half, bool ropnd_from_reg)
+{
+ swreg multiplier, multiplicand, dst_hi, dst_lo;
+ const struct bpf_insn *insn = &meta->insn;
+ u32 lopnd_max, ropnd_max;
+ u8 dst_reg;
+
+ dst_reg = insn->dst_reg;
+ multiplicand = reg_a(dst_reg * 2);
+ dst_hi = reg_both(dst_reg * 2 + 1);
+ dst_lo = reg_both(dst_reg * 2);
+ lopnd_max = meta->umax_dst;
+ if (ropnd_from_reg) {
+ multiplier = reg_b(insn->src_reg * 2);
+ ropnd_max = meta->umax_src;
+ } else {
+ u32 imm = insn->imm;
+
+ multiplier = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
+ ropnd_max = imm;
+ }
+ if (lopnd_max > U16_MAX || ropnd_max > U16_MAX)
+ wrp_mul_u32(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier,
+ gen_high_half);
+ else
+ wrp_mul_u16(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier);
+
+ return 0;
+}
+
+static int wrp_div_imm(struct nfp_prog *nfp_prog, u8 dst, u64 imm)
+{
+ swreg dst_both = reg_both(dst), dst_a = reg_a(dst), dst_b = reg_a(dst);
+ struct reciprocal_value_adv rvalue;
+ u8 pre_shift, exp;
+ swreg magic;
+
+ if (imm > U32_MAX) {
+ wrp_immed(nfp_prog, dst_both, 0);
+ return 0;
+ }
+
+ /* NOTE: because we are using "reciprocal_value_adv" which doesn't
+ * support "divisor > (1u << 31)", we need to JIT separate NFP sequence
+ * to handle such case which actually equals to the result of unsigned
+ * comparison "dst >= imm" which could be calculated using the following
+ * NFP sequence:
+ *
+ * alu[--, dst, -, imm]
+ * immed[imm, 0]
+ * alu[dst, imm, +carry, 0]
+ *
+ */
+ if (imm > 1U << 31) {
+ swreg tmp_b = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
+
+ emit_alu(nfp_prog, reg_none(), dst_a, ALU_OP_SUB, tmp_b);
+ wrp_immed(nfp_prog, imm_a(nfp_prog), 0);
+ emit_alu(nfp_prog, dst_both, imm_a(nfp_prog), ALU_OP_ADD_C,
+ reg_imm(0));
+ return 0;
+ }
+
+ rvalue = reciprocal_value_adv(imm, 32);
+ exp = rvalue.exp;
+ if (rvalue.is_wide_m && !(imm & 1)) {
+ pre_shift = fls(imm & -imm) - 1;
+ rvalue = reciprocal_value_adv(imm >> pre_shift, 32 - pre_shift);
+ } else {
+ pre_shift = 0;
+ }
+ magic = ur_load_imm_any(nfp_prog, rvalue.m, imm_b(nfp_prog));
+ if (imm == 1U << exp) {
+ emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b,
+ SHF_SC_R_SHF, exp);
+ } else if (rvalue.is_wide_m) {
+ wrp_mul_u32(nfp_prog, imm_both(nfp_prog), reg_none(), dst_a,
+ magic, true);
+ emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_SUB,
+ imm_b(nfp_prog));
+ emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b,
+ SHF_SC_R_SHF, 1);
+ emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_ADD,
+ imm_b(nfp_prog));
+ emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b,
+ SHF_SC_R_SHF, rvalue.sh - 1);
+ } else {
+ if (pre_shift)
+ emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE,
+ dst_b, SHF_SC_R_SHF, pre_shift);
+ wrp_mul_u32(nfp_prog, dst_both, reg_none(), dst_a, magic, true);
+ emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE,
+ dst_b, SHF_SC_R_SHF, rvalue.sh);
+ }
+
+ return 0;
+}
+
static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog);
@@ -1460,6 +1642,51 @@ static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
+static int adjust_tail(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ u32 ret_einval, end;
+ swreg plen, delta;
+
+ BUILD_BUG_ON(plen_reg(nfp_prog) != reg_b(STATIC_REG_PKT_LEN));
+
+ plen = imm_a(nfp_prog);
+ delta = reg_a(2 * 2);
+
+ ret_einval = nfp_prog_current_offset(nfp_prog) + 9;
+ end = nfp_prog_current_offset(nfp_prog) + 11;
+
+ /* Calculate resulting length */
+ emit_alu(nfp_prog, plen, plen_reg(nfp_prog), ALU_OP_ADD, delta);
+ /* delta == 0 is not allowed by the kernel, add must overflow to make
+ * length smaller.
+ */
+ emit_br(nfp_prog, BR_BCC, ret_einval, 0);
+
+ /* if (new_len < 14) then -EINVAL */
+ emit_alu(nfp_prog, reg_none(), plen, ALU_OP_SUB, reg_imm(ETH_HLEN));
+ emit_br(nfp_prog, BR_BMI, ret_einval, 0);
+
+ emit_alu(nfp_prog, plen_reg(nfp_prog),
+ plen_reg(nfp_prog), ALU_OP_ADD, delta);
+ emit_alu(nfp_prog, pv_len(nfp_prog),
+ pv_len(nfp_prog), ALU_OP_ADD, delta);
+
+ emit_br(nfp_prog, BR_UNC, end, 2);
+ wrp_immed(nfp_prog, reg_both(0), 0);
+ wrp_immed(nfp_prog, reg_both(1), 0);
+
+ if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval))
+ return -EINVAL;
+
+ wrp_immed(nfp_prog, reg_both(0), -22);
+ wrp_immed(nfp_prog, reg_both(1), ~0);
+
+ if (!nfp_prog_confirm_current_offset(nfp_prog, end))
+ return -EINVAL;
+
+ return 0;
+}
+
static int
map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
@@ -1684,6 +1911,31 @@ static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
+static int mul_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_mul(nfp_prog, meta, true, true);
+}
+
+static int mul_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_mul(nfp_prog, meta, true, false);
+}
+
+static int div_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ return wrp_div_imm(nfp_prog, insn->dst_reg * 2, insn->imm);
+}
+
+static int div_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ /* NOTE: verifier hook has rejected cases for which verifier doesn't
+ * know whether the source operand is constant or not.
+ */
+ return wrp_div_imm(nfp_prog, meta->insn.dst_reg * 2, meta->umin_src);
+}
+
static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
@@ -1772,8 +2024,8 @@ static int shl_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
u8 dst, src;
dst = insn->dst_reg * 2;
- umin = meta->umin;
- umax = meta->umax;
+ umin = meta->umin_src;
+ umax = meta->umax_src;
if (umin == umax)
return __shl_imm64(nfp_prog, dst, umin);
@@ -1881,8 +2133,8 @@ static int shr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
u8 dst, src;
dst = insn->dst_reg * 2;
- umin = meta->umin;
- umax = meta->umax;
+ umin = meta->umin_src;
+ umax = meta->umax_src;
if (umin == umax)
return __shr_imm64(nfp_prog, dst, umin);
@@ -1995,8 +2247,8 @@ static int ashr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
u8 dst, src;
dst = insn->dst_reg * 2;
- umin = meta->umin;
- umax = meta->umax;
+ umin = meta->umin_src;
+ umax = meta->umax_src;
if (umin == umax)
return __ashr_imm64(nfp_prog, dst, umin);
@@ -2097,6 +2349,26 @@ static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm);
}
+static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_mul(nfp_prog, meta, false, true);
+}
+
+static int mul_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_mul(nfp_prog, meta, false, false);
+}
+
+static int div_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return div_reg64(nfp_prog, meta);
+}
+
+static int div_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return div_imm64(nfp_prog, meta);
+}
+
static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
u8 dst = meta->insn.dst_reg * 2;
@@ -2814,6 +3086,8 @@ static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
switch (meta->insn.imm) {
case BPF_FUNC_xdp_adjust_head:
return adjust_head(nfp_prog, meta);
+ case BPF_FUNC_xdp_adjust_tail:
+ return adjust_tail(nfp_prog, meta);
case BPF_FUNC_map_lookup_elem:
case BPF_FUNC_map_update_elem:
case BPF_FUNC_map_delete_elem:
@@ -2848,6 +3122,10 @@ static const instr_cb_t instr_cb[256] = {
[BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64,
[BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64,
[BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64,
+ [BPF_ALU64 | BPF_MUL | BPF_X] = mul_reg64,
+ [BPF_ALU64 | BPF_MUL | BPF_K] = mul_imm64,
+ [BPF_ALU64 | BPF_DIV | BPF_X] = div_reg64,
+ [BPF_ALU64 | BPF_DIV | BPF_K] = div_imm64,
[BPF_ALU64 | BPF_NEG] = neg_reg64,
[BPF_ALU64 | BPF_LSH | BPF_X] = shl_reg64,
[BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64,
@@ -2867,6 +3145,10 @@ static const instr_cb_t instr_cb[256] = {
[BPF_ALU | BPF_ADD | BPF_K] = add_imm,
[BPF_ALU | BPF_SUB | BPF_X] = sub_reg,
[BPF_ALU | BPF_SUB | BPF_K] = sub_imm,
+ [BPF_ALU | BPF_MUL | BPF_X] = mul_reg,
+ [BPF_ALU | BPF_MUL | BPF_K] = mul_imm,
+ [BPF_ALU | BPF_DIV | BPF_X] = div_reg,
+ [BPF_ALU | BPF_DIV | BPF_K] = div_imm,
[BPF_ALU | BPF_NEG] = neg_reg,
[BPF_ALU | BPF_LSH | BPF_K] = shl_imm,
[BPF_ALU | BPF_END | BPF_X] = end_reg32,
@@ -3299,7 +3581,8 @@ curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta,
if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta))
return false;
- if (ld_meta->ptr.type != PTR_TO_PACKET)
+ if (ld_meta->ptr.type != PTR_TO_PACKET &&
+ ld_meta->ptr.type != PTR_TO_MAP_VALUE)
return false;
if (st_meta->ptr.type != PTR_TO_PACKET)
@@ -3647,6 +3930,7 @@ static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog)
struct nfp_insn_meta *meta1, *meta2;
struct nfp_bpf_map *nfp_map;
struct bpf_map *map;
+ u32 id;
nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
if (meta1->skip || meta2->skip)
@@ -3658,11 +3942,14 @@ static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog)
map = (void *)(unsigned long)((u32)meta1->insn.imm |
(u64)meta2->insn.imm << 32);
- if (bpf_map_offload_neutral(map))
- continue;
- nfp_map = map_to_offmap(map)->dev_priv;
+ if (bpf_map_offload_neutral(map)) {
+ id = map->id;
+ } else {
+ nfp_map = map_to_offmap(map)->dev_priv;
+ id = nfp_map->tid;
+ }
- meta1->insn.imm = nfp_map->tid;
+ meta1->insn.imm = id;
meta2->insn.imm = 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 40216d56dddc..970af07f4656 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -45,8 +45,8 @@
const struct rhashtable_params nfp_bpf_maps_neutral_params = {
.nelem_hint = 4,
- .key_len = FIELD_SIZEOF(struct nfp_bpf_neutral_map, ptr),
- .key_offset = offsetof(struct nfp_bpf_neutral_map, ptr),
+ .key_len = FIELD_SIZEOF(struct bpf_map, id),
+ .key_offset = offsetof(struct nfp_bpf_neutral_map, map_id),
.head_offset = offsetof(struct nfp_bpf_neutral_map, l),
.automatic_shrinking = true,
};
@@ -66,26 +66,19 @@ nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog, struct netlink_ext_ack *extack)
{
bool running, xdp_running;
- int ret;
if (!nfp_net_ebpf_capable(nn))
return -EINVAL;
running = nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF;
- xdp_running = running && nn->dp.bpf_offload_xdp;
+ xdp_running = running && nn->xdp_hw.prog;
if (!prog && !xdp_running)
return 0;
if (prog && running && !xdp_running)
return -EBUSY;
- ret = nfp_net_bpf_offload(nn, prog, running, extack);
- /* Stop offload if replace not possible */
- if (ret)
- return ret;
-
- nn->dp.bpf_offload_xdp = !!prog;
- return ret;
+ return nfp_net_bpf_offload(nn, prog, running, extack);
}
static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
@@ -202,14 +195,11 @@ static int nfp_bpf_setup_tc_block(struct net_device *netdev,
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
- if (tcf_block_shared(f->block))
- return -EOPNOTSUPP;
-
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block,
nfp_bpf_setup_tc_block_cb,
- nn, nn);
+ nn, nn, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block,
nfp_bpf_setup_tc_block_cb,
@@ -344,6 +334,14 @@ nfp_bpf_parse_cap_qsel(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
return 0;
}
+static int
+nfp_bpf_parse_cap_adjust_tail(struct nfp_app_bpf *bpf, void __iomem *value,
+ u32 length)
+{
+ bpf->adjust_tail = true;
+ return 0;
+}
+
static int nfp_bpf_parse_capabilities(struct nfp_app *app)
{
struct nfp_cpp *cpp = app->pf->cpp;
@@ -390,6 +388,11 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app)
if (nfp_bpf_parse_cap_qsel(app->priv, value, length))
goto err_release_free;
break;
+ case NFP_BPF_CAP_TYPE_ADJUST_TAIL:
+ if (nfp_bpf_parse_cap_adjust_tail(app->priv, value,
+ length))
+ goto err_release_free;
+ break;
default:
nfp_dbg(cpp, "unknown BPF capability: %d\n", type);
break;
@@ -411,6 +414,20 @@ err_release_free:
return -EINVAL;
}
+static int nfp_bpf_ndo_init(struct nfp_app *app, struct net_device *netdev)
+{
+ struct nfp_app_bpf *bpf = app->priv;
+
+ return bpf_offload_dev_netdev_register(bpf->bpf_dev, netdev);
+}
+
+static void nfp_bpf_ndo_uninit(struct nfp_app *app, struct net_device *netdev)
+{
+ struct nfp_app_bpf *bpf = app->priv;
+
+ bpf_offload_dev_netdev_unregister(bpf->bpf_dev, netdev);
+}
+
static int nfp_bpf_init(struct nfp_app *app)
{
struct nfp_app_bpf *bpf;
@@ -434,6 +451,11 @@ static int nfp_bpf_init(struct nfp_app *app)
if (err)
goto err_free_neutral_maps;
+ bpf->bpf_dev = bpf_offload_dev_create();
+ err = PTR_ERR_OR_ZERO(bpf->bpf_dev);
+ if (err)
+ goto err_free_neutral_maps;
+
return 0;
err_free_neutral_maps:
@@ -452,6 +474,7 @@ static void nfp_bpf_clean(struct nfp_app *app)
{
struct nfp_app_bpf *bpf = app->priv;
+ bpf_offload_dev_destroy(bpf->bpf_dev);
WARN_ON(!skb_queue_empty(&bpf->cmsg_replies));
WARN_ON(!list_empty(&bpf->map_list));
WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use);
@@ -473,10 +496,14 @@ const struct nfp_app_type app_bpf = {
.extra_cap = nfp_bpf_extra_cap,
+ .ndo_init = nfp_bpf_ndo_init,
+ .ndo_uninit = nfp_bpf_ndo_uninit,
+
.vnic_alloc = nfp_bpf_vnic_alloc,
.vnic_free = nfp_bpf_vnic_free,
.ctrl_msg_rx = nfp_bpf_ctrl_msg_rx,
+ .ctrl_msg_rx_raw = nfp_bpf_ctrl_msg_rx_raw,
.setup_tc = nfp_bpf_setup_tc,
.bpf = nfp_ndo_bpf,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 654fe7823e5e..dbd00982fd2b 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -47,6 +47,8 @@
#include "../nfp_asm.h"
#include "fw.h"
+#define cmsg_warn(bpf, msg...) nn_dp_warn(&(bpf)->app->ctrl->dp, msg)
+
/* For relocation logic use up-most byte of branch instruction as scratch
* area. Remember to clear this before sending instructions to HW!
*/
@@ -110,6 +112,8 @@ enum pkt_vec {
* struct nfp_app_bpf - bpf app priv structure
* @app: backpointer to the app
*
+ * @bpf_dev: BPF offload device handle
+ *
* @tag_allocator: bitmap of control message tags in use
* @tag_alloc_next: next tag bit to allocate
* @tag_alloc_last: next tag bit to be freed
@@ -146,10 +150,13 @@ enum pkt_vec {
*
* @pseudo_random: FW initialized the pseudo-random machinery (CSRs)
* @queue_select: BPF can set the RX queue ID in packet vector
+ * @adjust_tail: BPF can simply trunc packet size for adjust tail
*/
struct nfp_app_bpf {
struct nfp_app *app;
+ struct bpf_offload_dev *bpf_dev;
+
DECLARE_BITMAP(tag_allocator, U16_MAX + 1);
u16 tag_alloc_next;
u16 tag_alloc_last;
@@ -189,6 +196,7 @@ struct nfp_app_bpf {
bool pseudo_random;
bool queue_select;
+ bool adjust_tail;
};
enum nfp_bpf_map_use {
@@ -217,6 +225,7 @@ struct nfp_bpf_map {
struct nfp_bpf_neutral_map {
struct rhash_head l;
struct bpf_map *ptr;
+ u32 map_id;
u32 count;
};
@@ -263,8 +272,10 @@ struct nfp_bpf_reg_state {
* @func_id: function id for call instructions
* @arg1: arg1 for call instructions
* @arg2: arg2 for call instructions
- * @umin: copy of core verifier umin_value.
- * @umax: copy of core verifier umax_value.
+ * @umin_src: copy of core verifier umin_value for src opearnd.
+ * @umax_src: copy of core verifier umax_value for src operand.
+ * @umin_dst: copy of core verifier umin_value for dst opearnd.
+ * @umax_dst: copy of core verifier umax_value for dst operand.
* @off: index of first generated machine instruction (in nfp_prog.prog)
* @n: eBPF instruction number
* @flags: eBPF instruction extra optimization flags
@@ -300,12 +311,15 @@ struct nfp_insn_meta {
struct bpf_reg_state arg1;
struct nfp_bpf_reg_state arg2;
};
- /* We are interested in range info for some operands,
- * for example, the shift amount.
+ /* We are interested in range info for operands of ALU
+ * operations. For example, shift amount, multiplicand and
+ * multiplier etc.
*/
struct {
- u64 umin;
- u64 umax;
+ u64 umin_src;
+ u64 umax_src;
+ u64 umin_dst;
+ u64 umax_dst;
};
};
unsigned int off;
@@ -339,6 +353,11 @@ static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
return BPF_MODE(meta->insn.code);
}
+static inline bool is_mbpf_alu(const struct nfp_insn_meta *meta)
+{
+ return mbpf_class(meta) == BPF_ALU64 || mbpf_class(meta) == BPF_ALU;
+}
+
static inline bool is_mbpf_load(const struct nfp_insn_meta *meta)
{
return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM);
@@ -384,23 +403,14 @@ static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta)
return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD);
}
-static inline bool is_mbpf_indir_shift(const struct nfp_insn_meta *meta)
+static inline bool is_mbpf_mul(const struct nfp_insn_meta *meta)
{
- u8 code = meta->insn.code;
- bool is_alu, is_shift;
- u8 opclass, opcode;
-
- opclass = BPF_CLASS(code);
- is_alu = opclass == BPF_ALU64 || opclass == BPF_ALU;
- if (!is_alu)
- return false;
-
- opcode = BPF_OP(code);
- is_shift = opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH;
- if (!is_shift)
- return false;
+ return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_MUL;
+}
- return BPF_SRC(code) == BPF_X;
+static inline bool is_mbpf_div(const struct nfp_insn_meta *meta)
+{
+ return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_DIV;
}
/**
@@ -496,7 +506,11 @@ int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
void *key, void *next_key);
-int nfp_bpf_event_output(struct nfp_app_bpf *bpf, struct sk_buff *skb);
+int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
+ unsigned int len);
void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb);
+void
+nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data,
+ unsigned int len);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index 7eae4c0266f8..1ccd6371a15b 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -67,7 +67,7 @@ nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
ASSERT_RTNL();
/* Reuse path - other offloaded program is already tracking this map. */
- record = rhashtable_lookup_fast(&bpf->maps_neutral, &map,
+ record = rhashtable_lookup_fast(&bpf->maps_neutral, &map->id,
nfp_bpf_maps_neutral_params);
if (record) {
nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
@@ -89,6 +89,7 @@ nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
}
record->ptr = map;
+ record->map_id = map->id;
record->count = 1;
err = rhashtable_insert_fast(&bpf->maps_neutral, &record->l,
@@ -190,8 +191,10 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
meta->insn = prog[i];
meta->n = i;
- if (is_mbpf_indir_shift(meta))
- meta->umin = U64_MAX;
+ if (is_mbpf_alu(meta)) {
+ meta->umin_src = U64_MAX;
+ meta->umin_dst = U64_MAX;
+ }
list_add_tail(&meta->l, &nfp_prog->insns);
}
@@ -377,11 +380,23 @@ nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
bpf->maps.max_elems - bpf->map_elems_in_use);
return -ENOMEM;
}
- if (offmap->map.key_size > bpf->maps.max_key_sz ||
- offmap->map.value_size > bpf->maps.max_val_sz ||
- round_up(offmap->map.key_size, 8) +
+
+ if (round_up(offmap->map.key_size, 8) +
round_up(offmap->map.value_size, 8) > bpf->maps.max_elem_sz) {
- pr_info("elements don't fit in device constraints\n");
+ pr_info("map elements too large: %u, FW max element size (key+value): %u\n",
+ round_up(offmap->map.key_size, 8) +
+ round_up(offmap->map.value_size, 8),
+ bpf->maps.max_elem_sz);
+ return -ENOMEM;
+ }
+ if (offmap->map.key_size > bpf->maps.max_key_sz) {
+ pr_info("map key size %u, FW max is %u\n",
+ offmap->map.key_size, bpf->maps.max_key_sz);
+ return -ENOMEM;
+ }
+ if (offmap->map.value_size > bpf->maps.max_val_sz) {
+ pr_info("map value size %u, FW max is %u\n",
+ offmap->map.value_size, bpf->maps.max_val_sz);
return -ENOMEM;
}
@@ -451,43 +466,43 @@ nfp_bpf_perf_event_copy(void *dst, const void *src,
return 0;
}
-int nfp_bpf_event_output(struct nfp_app_bpf *bpf, struct sk_buff *skb)
+int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
+ unsigned int len)
{
- struct cmsg_bpf_event *cbe = (void *)skb->data;
- u32 pkt_size, data_size;
- struct bpf_map *map;
+ struct cmsg_bpf_event *cbe = (void *)data;
+ struct nfp_bpf_neutral_map *record;
+ u32 pkt_size, data_size, map_id;
+ u64 map_id_full;
- if (skb->len < sizeof(struct cmsg_bpf_event))
- goto err_drop;
+ if (len < sizeof(struct cmsg_bpf_event))
+ return -EINVAL;
pkt_size = be32_to_cpu(cbe->pkt_size);
data_size = be32_to_cpu(cbe->data_size);
- map = (void *)(unsigned long)be64_to_cpu(cbe->map_ptr);
+ map_id_full = be64_to_cpu(cbe->map_ptr);
+ map_id = map_id_full;
- if (skb->len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
- goto err_drop;
+ if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
+ return -EINVAL;
if (cbe->hdr.ver != CMSG_MAP_ABI_VERSION)
- goto err_drop;
+ return -EINVAL;
rcu_read_lock();
- if (!rhashtable_lookup_fast(&bpf->maps_neutral, &map,
- nfp_bpf_maps_neutral_params)) {
+ record = rhashtable_lookup_fast(&bpf->maps_neutral, &map_id,
+ nfp_bpf_maps_neutral_params);
+ if (!record || map_id_full > U32_MAX) {
rcu_read_unlock();
- pr_warn("perf event: dest map pointer %px not recognized, dropping event\n",
- map);
- goto err_drop;
+ cmsg_warn(bpf, "perf event: map id %lld (0x%llx) not recognized, dropping event\n",
+ map_id_full, map_id_full);
+ return -EINVAL;
}
- bpf_event_output(map, be32_to_cpu(cbe->cpu_id),
+ bpf_event_output(record->ptr, be32_to_cpu(cbe->cpu_id),
&cbe->data[round_up(pkt_size, 4)], data_size,
cbe->data, pkt_size, nfp_bpf_perf_event_copy);
rcu_read_unlock();
- dev_consume_skb_any(skb);
return 0;
-err_drop:
- dev_kfree_skb_any(skb);
- return -EINVAL;
}
static int
@@ -564,14 +579,8 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
{
int err;
- if (prog) {
- struct bpf_prog_offload *offload = prog->aux->offload;
-
- if (!offload)
- return -EINVAL;
- if (offload->netdev != nn->dp.netdev)
- return -EINVAL;
- }
+ if (prog && !bpf_offload_dev_match(prog, nn->dp.netdev))
+ return -EINVAL;
if (prog && old_prog) {
u8 cap;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index 4bfeba7b21b2..a6e9248669e1 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -178,6 +178,13 @@ nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
nfp_record_adjust_head(bpf, nfp_prog, meta, reg2);
break;
+ case BPF_FUNC_xdp_adjust_tail:
+ if (!bpf->adjust_tail) {
+ pr_vlog(env, "adjust_tail not supported by FW\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+
case BPF_FUNC_map_lookup_elem:
if (!nfp_bpf_map_call_ok("map_lookup", env, meta,
bpf->helpers.map_lookup, reg1) ||
@@ -517,6 +524,82 @@ nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
}
static int
+nfp_bpf_check_alu(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ struct bpf_verifier_env *env)
+{
+ const struct bpf_reg_state *sreg =
+ cur_regs(env) + meta->insn.src_reg;
+ const struct bpf_reg_state *dreg =
+ cur_regs(env) + meta->insn.dst_reg;
+
+ meta->umin_src = min(meta->umin_src, sreg->umin_value);
+ meta->umax_src = max(meta->umax_src, sreg->umax_value);
+ meta->umin_dst = min(meta->umin_dst, dreg->umin_value);
+ meta->umax_dst = max(meta->umax_dst, dreg->umax_value);
+
+ /* NFP supports u16 and u32 multiplication.
+ *
+ * For ALU64, if either operand is beyond u32's value range, we reject
+ * it. One thing to note, if the source operand is BPF_K, then we need
+ * to check "imm" field directly, and we'd reject it if it is negative.
+ * Because for ALU64, "imm" (with s32 type) is expected to be sign
+ * extended to s64 which NFP mul doesn't support.
+ *
+ * For ALU32, it is fine for "imm" be negative though, because the
+ * result is 32-bits and there is no difference on the low halve of
+ * the result for signed/unsigned mul, so we will get correct result.
+ */
+ if (is_mbpf_mul(meta)) {
+ if (meta->umax_dst > U32_MAX) {
+ pr_vlog(env, "multiplier is not within u32 value range\n");
+ return -EINVAL;
+ }
+ if (mbpf_src(meta) == BPF_X && meta->umax_src > U32_MAX) {
+ pr_vlog(env, "multiplicand is not within u32 value range\n");
+ return -EINVAL;
+ }
+ if (mbpf_class(meta) == BPF_ALU64 &&
+ mbpf_src(meta) == BPF_K && meta->insn.imm < 0) {
+ pr_vlog(env, "sign extended multiplicand won't be within u32 value range\n");
+ return -EINVAL;
+ }
+ }
+
+ /* NFP doesn't have divide instructions, we support divide by constant
+ * through reciprocal multiplication. Given NFP support multiplication
+ * no bigger than u32, we'd require divisor and dividend no bigger than
+ * that as well.
+ *
+ * Also eBPF doesn't support signed divide and has enforced this on C
+ * language level by failing compilation. However LLVM assembler hasn't
+ * enforced this, so it is possible for negative constant to leak in as
+ * a BPF_K operand through assembly code, we reject such cases as well.
+ */
+ if (is_mbpf_div(meta)) {
+ if (meta->umax_dst > U32_MAX) {
+ pr_vlog(env, "dividend is not within u32 value range\n");
+ return -EINVAL;
+ }
+ if (mbpf_src(meta) == BPF_X) {
+ if (meta->umin_src != meta->umax_src) {
+ pr_vlog(env, "divisor is not constant\n");
+ return -EINVAL;
+ }
+ if (meta->umax_src > U32_MAX) {
+ pr_vlog(env, "divisor is not within u32 value range\n");
+ return -EINVAL;
+ }
+ }
+ if (mbpf_src(meta) == BPF_K && meta->insn.imm < 0) {
+ pr_vlog(env, "divide by negative constant is not supported\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int
nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
{
struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
@@ -551,13 +634,8 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
if (is_mbpf_xadd(meta))
return nfp_bpf_check_xadd(nfp_prog, meta, env);
- if (is_mbpf_indir_shift(meta)) {
- const struct bpf_reg_state *sreg =
- cur_regs(env) + meta->insn.src_reg;
-
- meta->umin = min(meta->umin, sreg->umin_value);
- meta->umax = max(meta->umax, sreg->umax_value);
- }
+ if (is_mbpf_alu(meta))
+ return nfp_bpf_check_alu(nfp_prog, meta, env);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 4a6d2db75071..0ba0356ec4e6 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -32,8 +32,10 @@
*/
#include <linux/bitfield.h>
+#include <net/geneve.h>
#include <net/pkt_cls.h>
#include <net/switchdev.h>
+#include <net/tc_act/tc_csum.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_pedit.h>
@@ -44,6 +46,16 @@
#include "main.h"
#include "../nfp_net_repr.h"
+/* The kernel versions of TUNNEL_* are not ABI and therefore vulnerable
+ * to change. Such changes will break our FW ABI.
+ */
+#define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01)
+#define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04)
+#define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800)
+#define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \
+ NFP_FL_TUNNEL_KEY | \
+ NFP_FL_TUNNEL_GENEVE_OPT)
+
static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
{
size_t act_size = sizeof(struct nfp_fl_pop_vlan);
@@ -226,7 +238,71 @@ static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
}
static int
-nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
+nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
+ const struct tc_action *action)
+{
+ struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
+ int opt_len, opt_cnt, act_start, tot_push_len;
+ u8 *src = ip_tunnel_info_opts(ip_tun);
+
+ /* We need to populate the options in reverse order for HW.
+ * Therefore we go through the options, calculating the
+ * number of options and the total size, then we populate
+ * them in reverse order in the action list.
+ */
+ opt_cnt = 0;
+ tot_push_len = 0;
+ opt_len = ip_tun->options_len;
+ while (opt_len > 0) {
+ struct geneve_opt *opt = (struct geneve_opt *)src;
+
+ opt_cnt++;
+ if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT)
+ return -EOPNOTSUPP;
+
+ tot_push_len += sizeof(struct nfp_fl_push_geneve) +
+ opt->length * 4;
+ if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT)
+ return -EOPNOTSUPP;
+
+ opt_len -= sizeof(struct geneve_opt) + opt->length * 4;
+ src += sizeof(struct geneve_opt) + opt->length * 4;
+ }
+
+ if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ)
+ return -EOPNOTSUPP;
+
+ act_start = *list_len;
+ *list_len += tot_push_len;
+ src = ip_tunnel_info_opts(ip_tun);
+ while (opt_cnt) {
+ struct geneve_opt *opt = (struct geneve_opt *)src;
+ struct nfp_fl_push_geneve *push;
+ size_t act_size, len;
+
+ opt_cnt--;
+ act_size = sizeof(struct nfp_fl_push_geneve) + opt->length * 4;
+ tot_push_len -= act_size;
+ len = act_start + tot_push_len;
+
+ push = (struct nfp_fl_push_geneve *)&nfp_fl->action_data[len];
+ push->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_GENEVE;
+ push->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+ push->reserved = 0;
+ push->class = opt->opt_class;
+ push->type = opt->type;
+ push->length = opt->length;
+ memcpy(&push->opt_data, opt->opt_data, opt->length * 4);
+
+ src += sizeof(struct geneve_opt) + opt->length * 4;
+ }
+
+ return 0;
+}
+
+static int
+nfp_fl_set_ipv4_udp_tun(struct nfp_app *app,
+ struct nfp_fl_set_ipv4_udp_tun *set_tun,
const struct tc_action *action,
struct nfp_fl_pre_tunnel *pre_tun,
enum nfp_flower_tun_type tun_type,
@@ -234,16 +310,19 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
{
size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
+ struct nfp_flower_priv *priv = app->priv;
u32 tmp_set_ip_tun_type_index = 0;
/* Currently support one pre-tunnel so index is always 0. */
int pretun_idx = 0;
- struct net *net;
- if (ip_tun->options_len)
+ BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM ||
+ NFP_FL_TUNNEL_KEY != TUNNEL_KEY ||
+ NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
+ if (ip_tun->options_len &&
+ (tun_type != NFP_FL_TUNNEL_GENEVE ||
+ !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)))
return -EOPNOTSUPP;
- net = dev_net(netdev);
-
set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
@@ -254,7 +333,42 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
set_tun->tun_id = ip_tun->key.tun_id;
- set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
+
+ if (ip_tun->key.ttl) {
+ set_tun->ttl = ip_tun->key.ttl;
+ } else {
+ struct net *net = dev_net(netdev);
+ struct flowi4 flow = {};
+ struct rtable *rt;
+ int err;
+
+ /* Do a route lookup to determine ttl - if fails then use
+ * default. Note that CONFIG_INET is a requirement of
+ * CONFIG_NET_SWITCHDEV so must be defined here.
+ */
+ flow.daddr = ip_tun->key.u.ipv4.dst;
+ flow.flowi4_proto = IPPROTO_UDP;
+ rt = ip_route_output_key(net, &flow);
+ err = PTR_ERR_OR_ZERO(rt);
+ if (!err) {
+ set_tun->ttl = ip4_dst_hoplimit(&rt->dst);
+ ip_rt_put(rt);
+ } else {
+ set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
+ }
+ }
+
+ set_tun->tos = ip_tun->key.tos;
+
+ if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
+ ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS)
+ return -EOPNOTSUPP;
+ set_tun->tun_flags = ip_tun->key.tun_flags;
+
+ if (tun_type == NFP_FL_TUNNEL_GENEVE) {
+ set_tun->tun_proto = htons(ETH_P_TEB);
+ set_tun->tun_len = ip_tun->options_len / 4;
+ }
/* Complete pre_tunnel action. */
pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
@@ -398,8 +512,27 @@ nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off,
return 0;
}
+static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto)
+{
+ switch (ip_proto) {
+ case 0:
+ /* Filter doesn't force proto match,
+ * both TCP and UDP will be updated if encountered
+ */
+ return TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP;
+ case IPPROTO_TCP:
+ return TCA_CSUM_UPDATE_FLAG_TCP;
+ case IPPROTO_UDP:
+ return TCA_CSUM_UPDATE_FLAG_UDP;
+ default:
+ /* All other protocols will be ignored by FW */
+ return 0;
+ }
+}
+
static int
-nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
+nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
+ char *nfp_action, int *a_len, u32 *csum_updated)
{
struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
struct nfp_fl_set_ip4_addrs set_ip_addr;
@@ -409,6 +542,7 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
int idx, nkeys, err;
size_t act_size;
u32 offset, cmd;
+ u8 ip_proto = 0;
memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
memset(&set_ip6_src, 0, sizeof(set_ip6_src));
@@ -451,6 +585,15 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
return err;
}
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_dissector_key_basic *basic;
+
+ basic = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ flow->key);
+ ip_proto = basic->ip_proto;
+ }
+
if (set_eth.head.len_lw) {
act_size = sizeof(set_eth);
memcpy(nfp_action, &set_eth, act_size);
@@ -459,6 +602,10 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
act_size = sizeof(set_ip_addr);
memcpy(nfp_action, &set_ip_addr, act_size);
*a_len += act_size;
+
+ /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
+ *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
+ nfp_fl_csum_l4_to_flag(ip_proto);
} else if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
/* TC compiles set src and dst IPv6 address as a single action,
* the hardware requires this to be 2 separate actions.
@@ -471,18 +618,30 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
act_size);
*a_len += act_size;
+
+ /* Hardware will automatically fix TCP/UDP checksum. */
+ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
} else if (set_ip6_dst.head.len_lw) {
act_size = sizeof(set_ip6_dst);
memcpy(nfp_action, &set_ip6_dst, act_size);
*a_len += act_size;
+
+ /* Hardware will automatically fix TCP/UDP checksum. */
+ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
} else if (set_ip6_src.head.len_lw) {
act_size = sizeof(set_ip6_src);
memcpy(nfp_action, &set_ip6_src, act_size);
*a_len += act_size;
+
+ /* Hardware will automatically fix TCP/UDP checksum. */
+ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
} else if (set_tport.head.len_lw) {
act_size = sizeof(set_tport);
memcpy(nfp_action, &set_tport, act_size);
*a_len += act_size;
+
+ /* Hardware will automatically fix TCP/UDP checksum. */
+ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
}
return 0;
@@ -493,12 +652,18 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev, bool last,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
- int *out_cnt)
+ int *out_cnt, u32 *csum_updated)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_output *output;
int err, prelag_size;
+ /* If csum_updated has not been reset by now, it means HW will
+ * incorrectly update csums when they are not requested.
+ */
+ if (*csum_updated)
+ return -EOPNOTSUPP;
+
if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
return -EOPNOTSUPP;
@@ -529,10 +694,11 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
static int
nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
+ struct tc_cls_flower_offload *flow,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
- int *out_cnt)
+ int *out_cnt, u32 *csum_updated)
{
struct nfp_fl_set_ipv4_udp_tun *set_tun;
struct nfp_fl_pre_tunnel *pre_tun;
@@ -545,14 +711,14 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
} else if (is_tcf_mirred_egress_redirect(a)) {
err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
true, tun_type, tun_out_cnt,
- out_cnt);
+ out_cnt, csum_updated);
if (err)
return err;
} else if (is_tcf_mirred_egress_mirror(a)) {
err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
false, tun_type, tun_out_cnt,
- out_cnt);
+ out_cnt, csum_updated);
if (err)
return err;
@@ -592,9 +758,13 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
*a_len += sizeof(struct nfp_fl_pre_tunnel);
+ err = nfp_fl_push_geneve_options(nfp_fl, a_len, a);
+ if (err)
+ return err;
+
set_tun = (void *)&nfp_fl->action_data[*a_len];
- err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type,
- netdev);
+ err = nfp_fl_set_ipv4_udp_tun(app, set_tun, a, pre_tun,
+ *tun_type, netdev);
if (err)
return err;
*a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
@@ -602,8 +772,17 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
/* Tunnel decap is handled by default so accept action. */
return 0;
} else if (is_tcf_pedit(a)) {
- if (nfp_fl_pedit(a, &nfp_fl->action_data[*a_len], a_len))
+ if (nfp_fl_pedit(a, flow, &nfp_fl->action_data[*a_len],
+ a_len, csum_updated))
+ return -EOPNOTSUPP;
+ } else if (is_tcf_csum(a)) {
+ /* csum action requests recalc of something we have not fixed */
+ if (tcf_csum_update_flags(a) & ~*csum_updated)
return -EOPNOTSUPP;
+ /* If we will correctly fix the csum we can remove it from the
+ * csum update list. Which will later be used to check support.
+ */
+ *csum_updated &= ~tcf_csum_update_flags(a);
} else {
/* Currently we do not handle any other actions. */
return -EOPNOTSUPP;
@@ -620,6 +799,7 @@ int nfp_flower_compile_action(struct nfp_app *app,
int act_len, act_cnt, err, tun_out_cnt, out_cnt;
enum nfp_flower_tun_type tun_type;
const struct tc_action *a;
+ u32 csum_updated = 0;
LIST_HEAD(actions);
memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
@@ -632,8 +812,9 @@ int nfp_flower_compile_action(struct nfp_app *app,
tcf_exts_to_list(flow->exts, &actions);
list_for_each_entry(a, &actions, list) {
- err = nfp_flower_loop_action(app, a, nfp_flow, &act_len, netdev,
- &tun_type, &tun_out_cnt, &out_cnt);
+ err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len,
+ netdev, &tun_type, &tun_out_cnt,
+ &out_cnt, &csum_updated);
if (err)
return err;
act_cnt++;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 4a7f3510a296..325954b829c8 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -37,6 +37,7 @@
#include <linux/bitfield.h>
#include <linux/skbuff.h>
#include <linux/types.h>
+#include <net/geneve.h>
#include "../nfp_app.h"
#include "../nfpcore/nfp_cpp.h"
@@ -51,6 +52,7 @@
#define NFP_FLOWER_LAYER_VXLAN BIT(7)
#define NFP_FLOWER_LAYER2_GENEVE BIT(5)
+#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6)
#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13)
#define NFP_FLOWER_MASK_VLAN_CFI BIT(12)
@@ -81,6 +83,11 @@
#define NFP_FL_MAX_A_SIZ 1216
#define NFP_FL_LW_SIZ 2
+/* Maximum allowed geneve options */
+#define NFP_FL_MAX_GENEVE_OPT_ACT 32
+#define NFP_FL_MAX_GENEVE_OPT_CNT 64
+#define NFP_FL_MAX_GENEVE_OPT_KEY 32
+
/* Action opcodes */
#define NFP_FL_ACTION_OPCODE_OUTPUT 0
#define NFP_FL_ACTION_OPCODE_PUSH_VLAN 1
@@ -94,6 +101,7 @@
#define NFP_FL_ACTION_OPCODE_SET_TCP 15
#define NFP_FL_ACTION_OPCODE_PRE_LAG 16
#define NFP_FL_ACTION_OPCODE_PRE_TUNNEL 17
+#define NFP_FL_ACTION_OPCODE_PUSH_GENEVE 26
#define NFP_FL_ACTION_OPCODE_NUM 32
#define NFP_FL_OUT_FLAGS_LAST BIT(15)
@@ -203,10 +211,22 @@ struct nfp_fl_set_ipv4_udp_tun {
__be16 reserved;
__be64 tun_id __packed;
__be32 tun_type_index;
- __be16 reserved2;
+ __be16 tun_flags;
u8 ttl;
- u8 reserved3;
- __be32 extra[2];
+ u8 tos;
+ __be32 extra;
+ u8 tun_len;
+ u8 res2;
+ __be16 tun_proto;
+};
+
+struct nfp_fl_push_geneve {
+ struct nfp_fl_act_head head;
+ __be16 reserved;
+ __be16 class;
+ u8 type;
+ u8 length;
+ u8 opt_data[];
};
/* Metadata with L2 (1W/4B)
@@ -346,7 +366,7 @@ struct nfp_flower_ipv6 {
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ipv4_addr_dst |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | Reserved |
+ * | Reserved | tos | ttl |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -356,10 +376,17 @@ struct nfp_flower_ipv6 {
struct nfp_flower_ipv4_udp_tun {
__be32 ip_src;
__be32 ip_dst;
- __be32 reserved[2];
+ __be16 reserved1;
+ u8 tos;
+ u8 ttl;
+ __be32 reserved2;
__be32 tun_id;
};
+struct nfp_flower_geneve_options {
+ u8 data[NFP_FL_MAX_GENEVE_OPT_KEY];
+};
+
#define NFP_FL_TUN_VNI_OFFSET 8
/* The base header for a control message packet.
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
index 0c4c957717ea..bf10598f66ae 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -564,8 +564,9 @@ nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag,
if (lag_upper_info &&
lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
(lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH ||
- (lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 &&
- lag_upper_info->hash_type != NETDEV_LAG_HASH_E34))) {
+ (lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 &&
+ lag_upper_info->hash_type != NETDEV_LAG_HASH_E34 &&
+ lag_upper_info->hash_type != NETDEV_LAG_HASH_UNKNOWN))) {
can_offload = false;
nfp_flower_cmsg_warn(priv->app,
"Unable to offload tx_type %u hash %u\n",
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index bbe5764d26cb..85f8209bf007 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -69,11 +69,12 @@ struct nfp_app;
/* Extra features bitmap. */
#define NFP_FL_FEATS_GENEVE BIT(0)
#define NFP_FL_NBI_MTU_SETTING BIT(1)
+#define NFP_FL_FEATS_GENEVE_OPT BIT(2)
#define NFP_FL_FEATS_LAG BIT(31)
struct nfp_fl_mask_id {
struct circ_buf mask_id_free_list;
- struct timespec64 *last_used;
+ ktime_t *last_used;
u8 init_unallocated;
};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 84f7a5dbea9d..a0c72f277faa 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -262,6 +262,21 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
nfp_flower_compile_ip_ext(&frame->ip_ext, flow, mask_version);
}
+static int
+nfp_flower_compile_geneve_opt(void *key_buf, struct tc_cls_flower_offload *flow,
+ bool mask_version)
+{
+ struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
+ struct flow_dissector_key_enc_opts *opts;
+
+ opts = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_OPTS,
+ target);
+ memcpy(key_buf, opts->data, opts->len);
+
+ return 0;
+}
+
static void
nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
struct tc_cls_flower_offload *flow,
@@ -270,6 +285,7 @@ nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
struct flow_dissector_key_ipv4_addrs *tun_ips;
struct flow_dissector_key_keyid *vni;
+ struct flow_dissector_key_ip *ip;
memset(frame, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
@@ -293,6 +309,14 @@ nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
frame->ip_src = tun_ips->src;
frame->ip_dst = tun_ips->dst;
}
+
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
+ ip = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IP,
+ target);
+ frame->tos = ip->tos;
+ frame->ttl = ip->ttl;
+ }
}
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
@@ -415,6 +439,16 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
nfp_flow->nfp_tun_ipv4_addr = tun_dst;
nfp_tunnel_add_ipv4_off(netdev_repr->app, tun_dst);
}
+
+ if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
+ err = nfp_flower_compile_geneve_opt(ext, flow, false);
+ if (err)
+ return err;
+
+ err = nfp_flower_compile_geneve_opt(msk, flow, true);
+ if (err)
+ return err;
+ }
}
return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 93fb809f50d1..c098730544b7 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -158,7 +158,6 @@ static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
{
struct nfp_flower_priv *priv = app->priv;
struct circ_buf *ring;
- struct timespec64 now;
ring = &priv->mask_ids.mask_id_free_list;
/* Checking if buffer is full. */
@@ -169,8 +168,7 @@ static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
ring->head = (ring->head + NFP_FLOWER_MASK_ELEMENT_RS) %
(NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
- getnstimeofday64(&now);
- priv->mask_ids.last_used[mask_id] = now;
+ priv->mask_ids.last_used[mask_id] = ktime_get();
return 0;
}
@@ -178,7 +176,7 @@ static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id)
{
struct nfp_flower_priv *priv = app->priv;
- struct timespec64 delta, now;
+ ktime_t reuse_timeout;
struct circ_buf *ring;
u8 temp_id, freed_id;
@@ -198,10 +196,10 @@ static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id)
memcpy(&temp_id, &ring->buf[ring->tail], NFP_FLOWER_MASK_ELEMENT_RS);
*mask_id = temp_id;
- getnstimeofday64(&now);
- delta = timespec64_sub(now, priv->mask_ids.last_used[*mask_id]);
+ reuse_timeout = ktime_add_ns(priv->mask_ids.last_used[*mask_id],
+ NFP_FL_MASK_REUSE_TIME_NS);
- if (timespec64_to_ns(&delta) < NFP_FL_MASK_REUSE_TIME_NS)
+ if (ktime_before(ktime_get(), reuse_timeout))
goto err_not_found;
memcpy(&ring->buf[ring->tail], &freed_id, NFP_FLOWER_MASK_ELEMENT_RS);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 525057bee0ed..2edab01c3beb 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -66,6 +66,8 @@
BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
BIT(FLOW_DISSECTOR_KEY_MPLS) | \
BIT(FLOW_DISSECTOR_KEY_IP))
@@ -74,7 +76,9 @@
BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
- BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
+ BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IP))
#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
@@ -139,6 +143,21 @@ static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
}
static int
+nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
+ u32 *key_layer_two, int *key_size)
+{
+ if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY)
+ return -EOPNOTSUPP;
+
+ if (enc_opts->len > 0) {
+ *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
+ *key_size += sizeof(struct nfp_flower_geneve_options);
+ }
+
+ return 0;
+}
+
+static int
nfp_flower_calculate_key_layers(struct nfp_app *app,
struct nfp_fl_key_ls *ret_key_ls,
struct tc_cls_flower_offload *flow,
@@ -151,6 +170,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
u32 key_layer_two;
u8 key_layer;
int key_size;
+ int err;
if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
return -EOPNOTSUPP;
@@ -176,6 +196,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL;
struct flow_dissector_key_ports *mask_enc_ports = NULL;
+ struct flow_dissector_key_enc_opts *enc_op = NULL;
struct flow_dissector_key_ports *enc_ports = NULL;
struct flow_dissector_key_control *mask_enc_ctl =
skb_flow_dissector_target(flow->dissector,
@@ -212,11 +233,21 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
if (mask_enc_ports->dst != cpu_to_be16(~0))
return -EOPNOTSUPP;
+ if (dissector_uses_key(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_OPTS)) {
+ enc_op = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_OPTS,
+ flow->key);
+ }
+
switch (enc_ports->dst) {
case htons(NFP_FL_VXLAN_PORT):
*tun_type = NFP_FL_TUNNEL_VXLAN;
key_layer |= NFP_FLOWER_LAYER_VXLAN;
key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+
+ if (enc_op)
+ return -EOPNOTSUPP;
break;
case htons(NFP_FL_GENEVE_PORT):
if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
@@ -226,6 +257,15 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
key_size += sizeof(struct nfp_flower_ext_meta);
key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+
+ if (!enc_op)
+ break;
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))
+ return -EOPNOTSUPP;
+ err = nfp_flower_calc_opt_layer(enc_op, &key_layer_two,
+ &key_size);
+ if (err)
+ return err;
break;
default:
return -EOPNOTSUPP;
@@ -584,9 +624,9 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
return nfp_flower_del_offload(app, netdev, flower, egress);
case TC_CLSFLOWER_STATS:
return nfp_flower_get_stats(app, netdev, flower, egress);
+ default:
+ return -EOPNOTSUPP;
}
-
- return -EOPNOTSUPP;
}
int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
@@ -631,14 +671,11 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
- if (tcf_block_shared(f->block))
- return -EOPNOTSUPP;
-
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block,
nfp_flower_setup_tc_block_cb,
- repr, repr);
+ repr, repr, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block,
nfp_flower_setup_tc_block_cb,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
index f28b244f4ee7..8607d09ab732 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -86,6 +86,23 @@ const char *nfp_app_mip_name(struct nfp_app *app)
return nfp_mip_name(app->pf->mip);
}
+int nfp_app_ndo_init(struct net_device *netdev)
+{
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
+
+ if (!app || !app->type->ndo_init)
+ return 0;
+ return app->type->ndo_init(app, netdev);
+}
+
+void nfp_app_ndo_uninit(struct net_device *netdev)
+{
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
+
+ if (app && app->type->ndo_uninit)
+ app->type->ndo_uninit(app, netdev);
+}
+
u64 *nfp_app_port_get_stats(struct nfp_port *port, u64 *data)
{
if (!port || !port->app || !port->app->type->port_get_stats)
@@ -155,6 +172,8 @@ struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
if (WARN_ON(!apps[id]->name || !apps[id]->vnic_alloc))
return ERR_PTR(-EINVAL);
+ if (WARN_ON(!apps[id]->ctrl_msg_rx && apps[id]->ctrl_msg_rx_raw))
+ return ERR_PTR(-EINVAL);
app = kzalloc(sizeof(*app), GFP_KERNEL);
if (!app)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index ee74caacb015..4e1eb3395648 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -78,6 +78,8 @@ extern const struct nfp_app_type app_abm;
* @init: perform basic app checks and init
* @clean: clean app state
* @extra_cap: extra capabilities string
+ * @ndo_init: vNIC and repr netdev .ndo_init
+ * @ndo_uninit: vNIC and repr netdev .ndo_unint
* @vnic_alloc: allocate vNICs (assign port types, etc.)
* @vnic_free: free up app's vNIC state
* @vnic_init: vNIC netdev was registered
@@ -96,6 +98,7 @@ extern const struct nfp_app_type app_abm;
* @start: start application logic
* @stop: stop application logic
* @ctrl_msg_rx: control message handler
+ * @ctrl_msg_rx_raw: handler for control messages from data queues
* @setup_tc: setup TC ndo
* @bpf: BPF ndo offload-related calls
* @xdp_offload: offload an XDP program
@@ -117,6 +120,9 @@ struct nfp_app_type {
const char *(*extra_cap)(struct nfp_app *app, struct nfp_net *nn);
+ int (*ndo_init)(struct nfp_app *app, struct net_device *netdev);
+ void (*ndo_uninit)(struct nfp_app *app, struct net_device *netdev);
+
int (*vnic_alloc)(struct nfp_app *app, struct nfp_net *nn,
unsigned int id);
void (*vnic_free)(struct nfp_app *app, struct nfp_net *nn);
@@ -145,6 +151,8 @@ struct nfp_app_type {
void (*stop)(struct nfp_app *app);
void (*ctrl_msg_rx)(struct nfp_app *app, struct sk_buff *skb);
+ void (*ctrl_msg_rx_raw)(struct nfp_app *app, const void *data,
+ unsigned int len);
int (*setup_tc)(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data);
@@ -200,6 +208,9 @@ static inline void nfp_app_clean(struct nfp_app *app)
app->type->clean(app);
}
+int nfp_app_ndo_init(struct net_device *netdev);
+void nfp_app_ndo_uninit(struct net_device *netdev);
+
static inline int nfp_app_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
unsigned int id)
{
@@ -310,6 +321,11 @@ static inline bool nfp_app_ctrl_has_meta(struct nfp_app *app)
return app->type->ctrl_has_meta;
}
+static inline bool nfp_app_ctrl_uses_data_vnics(struct nfp_app *app)
+{
+ return app && app->type->ctrl_msg_rx_raw;
+}
+
static inline const char *nfp_app_extra_cap(struct nfp_app *app,
struct nfp_net *nn)
{
@@ -373,6 +389,16 @@ static inline void nfp_app_ctrl_rx(struct nfp_app *app, struct sk_buff *skb)
app->type->ctrl_msg_rx(app, skb);
}
+static inline void
+nfp_app_ctrl_rx_raw(struct nfp_app *app, const void *data, unsigned int len)
+{
+ if (!app || !app->type->ctrl_msg_rx_raw)
+ return;
+
+ trace_devlink_hwmsg(priv_to_devlink(app->pf), true, 0, data, len);
+ app->type->ctrl_msg_rx_raw(app, data, len);
+}
+
static inline int nfp_app_eswitch_mode_get(struct nfp_app *app, u16 *mode)
{
if (!app->type->eswitch_mode_get)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
index f6677bc9875a..fad0e62a910c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
@@ -93,6 +93,7 @@ enum br_mask {
BR_BNE = 0x01,
BR_BMI = 0x02,
BR_BHS = 0x04,
+ BR_BCC = 0x05,
BR_BLO = 0x05,
BR_BGE = 0x08,
BR_BLT = 0x09,
@@ -426,4 +427,32 @@ static inline u32 nfp_get_ind_csr_ctx_ptr_offs(u32 read_offset)
return (read_offset & ~NFP_IND_ME_CTX_PTR_BASE_MASK) | NFP_CSR_CTX_PTR;
}
+enum mul_type {
+ MUL_TYPE_START = 0x00,
+ MUL_TYPE_STEP_24x8 = 0x01,
+ MUL_TYPE_STEP_16x16 = 0x02,
+ MUL_TYPE_STEP_32x32 = 0x03,
+};
+
+enum mul_step {
+ MUL_STEP_1 = 0x00,
+ MUL_STEP_NONE = MUL_STEP_1,
+ MUL_STEP_2 = 0x01,
+ MUL_STEP_3 = 0x02,
+ MUL_STEP_4 = 0x03,
+ MUL_LAST = 0x04,
+ MUL_LAST_2 = 0x05,
+};
+
+#define OP_MUL_BASE 0x0f800000000ULL
+#define OP_MUL_A_SRC 0x000000003ffULL
+#define OP_MUL_B_SRC 0x000000ffc00ULL
+#define OP_MUL_STEP 0x00000700000ULL
+#define OP_MUL_DST_AB 0x00000800000ULL
+#define OP_MUL_SW 0x00040000000ULL
+#define OP_MUL_TYPE 0x00180000000ULL
+#define OP_MUL_WR_AB 0x20000000000ULL
+#define OP_MUL_SRC_LMEXTN 0x40000000000ULL
+#define OP_MUL_DST_LMEXTN 0x80000000000ULL
+
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 152283d7e59c..4a540c5e27fe 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -236,16 +236,20 @@ static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf)
int err;
pf->limit_vfs = nfp_rtsym_read_le(pf->rtbl, "nfd_vf_cfg_max_vfs", &err);
- if (!err)
- return pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs);
+ if (err) {
+ /* For backwards compatibility if symbol not found allow all */
+ pf->limit_vfs = ~0;
+ if (err == -ENOENT)
+ return 0;
- pf->limit_vfs = ~0;
- /* Allow any setting for backwards compatibility if symbol not found */
- if (err == -ENOENT)
- return 0;
+ nfp_warn(pf->cpp, "Warning: VF limit read failed: %d\n", err);
+ return err;
+ }
- nfp_warn(pf->cpp, "Warning: VF limit read failed: %d\n", err);
- return err;
+ err = pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs);
+ if (err)
+ nfp_warn(pf->cpp, "Failed to set VF count in sysfs: %d\n", err);
+ return 0;
}
static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 2a71a9ffd095..439e6ffe2f05 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -250,7 +250,7 @@ struct nfp_net_tx_ring {
struct nfp_net_tx_desc *txds;
dma_addr_t dma;
- unsigned int size;
+ size_t size;
bool is_xdp;
} ____cacheline_aligned;
@@ -350,9 +350,9 @@ struct nfp_net_rx_buf {
* @qcp_fl: Pointer to base of the QCP freelist queue
* @rxbufs: Array of transmitted FL/RX buffers
* @rxds: Virtual address of FL/RX ring in host memory
+ * @xdp_rxq: RX-ring info avail for XDP
* @dma: DMA address of the FL/RX ring
* @size: Size, in bytes, of the FL/RX ring (needed to free)
- * @xdp_rxq: RX-ring info avail for XDP
*/
struct nfp_net_rx_ring {
struct nfp_net_r_vector *r_vec;
@@ -364,14 +364,15 @@ struct nfp_net_rx_ring {
u32 idx;
int fl_qcidx;
- unsigned int size;
u8 __iomem *qcp_fl;
struct nfp_net_rx_buf *rxbufs;
struct nfp_net_rx_desc *rxds;
- dma_addr_t dma;
struct xdp_rxq_info xdp_rxq;
+
+ dma_addr_t dma;
+ size_t size;
} ____cacheline_aligned;
/**
@@ -485,7 +486,6 @@ struct nfp_stat_pair {
* @dev: Backpointer to struct device
* @netdev: Backpointer to net_device structure
* @is_vf: Is the driver attached to a VF?
- * @bpf_offload_xdp: Offloaded BPF program is XDP
* @chained_metadata_format: Firemware will use new metadata format
* @rx_dma_dir: Mapping direction for RX buffers
* @rx_dma_off: Offset at which DMA packets (for XDP headroom)
@@ -510,7 +510,6 @@ struct nfp_net_dp {
struct net_device *netdev;
u8 is_vf:1;
- u8 bpf_offload_xdp:1;
u8 chained_metadata_format:1;
u8 rx_dma_dir;
@@ -553,8 +552,8 @@ struct nfp_net_dp {
* @rss_cfg: RSS configuration
* @rss_key: RSS secret key
* @rss_itbl: RSS indirection table
- * @xdp_flags: Flags with which XDP prog was loaded
- * @xdp_prog: XDP prog (for ctrl path, both DRV and HW modes)
+ * @xdp: Information about the driver XDP program
+ * @xdp_hw: Information about the HW XDP program
* @max_r_vecs: Number of allocated interrupt vectors for RX/TX
* @max_tx_rings: Maximum number of TX rings supported by the Firmware
* @max_rx_rings: Maximum number of RX rings supported by the Firmware
@@ -610,8 +609,8 @@ struct nfp_net {
u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ];
u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ];
- u32 xdp_flags;
- struct bpf_prog *xdp_prog;
+ struct xdp_attachment_info xdp;
+ struct xdp_attachment_info xdp_hw;
unsigned int max_tx_rings;
unsigned int max_rx_rings;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index d4c27f849f9b..a8b9fbab5f73 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -53,6 +53,8 @@
#include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/mm.h>
+#include <linux/overflow.h>
#include <linux/page_ref.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
@@ -945,11 +947,10 @@ err_free:
/**
* nfp_net_tx_complete() - Handled completed TX packets
- * @tx_ring: TX ring structure
- *
- * Return: Number of completed TX descriptors
+ * @tx_ring: TX ring structure
+ * @budget: NAPI budget (only used as bool to determine if in NAPI context)
*/
-static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
+static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
@@ -999,7 +1000,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
/* check for last gather fragment */
if (fidx == nr_frags - 1)
- dev_consume_skb_any(skb);
+ napi_consume_skb(skb, budget);
tx_ring->txbufs[idx].dma_addr = 0;
tx_ring->txbufs[idx].skb = NULL;
@@ -1077,7 +1078,7 @@ static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
* @dp: NFP Net data path struct
* @tx_ring: TX ring structure
*
- * Assumes that the device is stopped
+ * Assumes that the device is stopped, must be idempotent.
*/
static void
nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
@@ -1119,7 +1120,7 @@ nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
tx_ring->rd_p++;
}
- memset(tx_ring->txds, 0, sizeof(*tx_ring->txds) * tx_ring->cnt);
+ memset(tx_ring->txds, 0, tx_ring->size);
tx_ring->wr_p = 0;
tx_ring->rd_p = 0;
tx_ring->qcp_rd_p = 0;
@@ -1279,13 +1280,18 @@ static void nfp_net_rx_give_one(const struct nfp_net_dp *dp,
* nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
* @rx_ring: RX ring structure
*
- * Warning: Do *not* call if ring buffers were never put on the FW freelist
- * (i.e. device was not enabled)!
+ * Assumes that the device is stopped, must be idempotent.
*/
static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
{
unsigned int wr_idx, last_idx;
+ /* wr_p == rd_p means ring was never fed FL bufs. RX rings are always
+ * kept at cnt - 1 FL bufs.
+ */
+ if (rx_ring->wr_p == 0 && rx_ring->rd_p == 0)
+ return;
+
/* Move the empty entry to the end of the list */
wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
last_idx = rx_ring->cnt - 1;
@@ -1294,7 +1300,7 @@ static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
rx_ring->rxbufs[last_idx].dma_addr = 0;
rx_ring->rxbufs[last_idx].frag = NULL;
- memset(rx_ring->rxds, 0, sizeof(*rx_ring->rxds) * rx_ring->cnt);
+ memset(rx_ring->rxds, 0, rx_ring->size);
rx_ring->wr_p = 0;
rx_ring->rd_p = 0;
}
@@ -1709,8 +1715,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
}
}
- if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF &&
- dp->bpf_offload_xdp) && !meta.portid) {
+ if (xdp_prog && !meta.portid) {
void *orig_data = rxbuf->frag + pkt_off;
unsigned int dma_off;
int act;
@@ -1752,6 +1757,29 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
}
}
+ if (likely(!meta.portid)) {
+ netdev = dp->netdev;
+ } else if (meta.portid == NFP_META_PORT_ID_CTRL) {
+ struct nfp_net *nn = netdev_priv(dp->netdev);
+
+ nfp_app_ctrl_rx_raw(nn->app, rxbuf->frag + pkt_off,
+ pkt_len);
+ nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
+ rxbuf->dma_addr);
+ continue;
+ } else {
+ struct nfp_net *nn;
+
+ nn = netdev_priv(dp->netdev);
+ netdev = nfp_app_repr_get(nn->app, meta.portid);
+ if (unlikely(!netdev)) {
+ nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
+ NULL);
+ continue;
+ }
+ nfp_repr_inc_rx_stats(netdev, pkt_len);
+ }
+
skb = build_skb(rxbuf->frag, true_bufsz);
if (unlikely(!skb)) {
nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
@@ -1767,20 +1795,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
- if (likely(!meta.portid)) {
- netdev = dp->netdev;
- } else {
- struct nfp_net *nn;
-
- nn = netdev_priv(dp->netdev);
- netdev = nfp_app_repr_get(nn->app, meta.portid);
- if (unlikely(!netdev)) {
- nfp_net_rx_drop(dp, r_vec, rx_ring, NULL, skb);
- continue;
- }
- nfp_repr_inc_rx_stats(netdev, pkt_len);
- }
-
skb_reserve(skb, pkt_off);
skb_put(skb, pkt_len);
@@ -1828,7 +1842,7 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
unsigned int pkts_polled = 0;
if (r_vec->tx_ring)
- nfp_net_tx_complete(r_vec->tx_ring);
+ nfp_net_tx_complete(r_vec->tx_ring, budget);
if (r_vec->rx_ring)
pkts_polled = nfp_net_rx(r_vec->rx_ring, budget);
@@ -2062,7 +2076,7 @@ static void nfp_ctrl_poll(unsigned long arg)
struct nfp_net_r_vector *r_vec = (void *)arg;
spin_lock_bh(&r_vec->lock);
- nfp_net_tx_complete(r_vec->tx_ring);
+ nfp_net_tx_complete(r_vec->tx_ring, 0);
__nfp_ctrl_tx_queued(r_vec);
spin_unlock_bh(&r_vec->lock);
@@ -2121,7 +2135,7 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
- kfree(tx_ring->txbufs);
+ kvfree(tx_ring->txbufs);
if (tx_ring->txds)
dma_free_coherent(dp->dev, tx_ring->size,
@@ -2145,18 +2159,17 @@ static int
nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
- int sz;
tx_ring->cnt = dp->txd_cnt;
- tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt;
+ tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds));
tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
if (!tx_ring->txds)
goto err_alloc;
- sz = sizeof(*tx_ring->txbufs) * tx_ring->cnt;
- tx_ring->txbufs = kzalloc(sz, GFP_KERNEL);
+ tx_ring->txbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->txbufs),
+ GFP_KERNEL);
if (!tx_ring->txbufs)
goto err_alloc;
@@ -2270,7 +2283,7 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
if (dp->netdev)
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
- kfree(rx_ring->rxbufs);
+ kvfree(rx_ring->rxbufs);
if (rx_ring->rxds)
dma_free_coherent(dp->dev, rx_ring->size,
@@ -2293,7 +2306,7 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
static int
nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
{
- int sz, err;
+ int err;
if (dp->netdev) {
err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
@@ -2303,14 +2316,14 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
}
rx_ring->cnt = dp->rxd_cnt;
- rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
+ rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
if (!rx_ring->rxds)
goto err_alloc;
- sz = sizeof(*rx_ring->rxbufs) * rx_ring->cnt;
- rx_ring->rxbufs = kzalloc(sz, GFP_KERNEL);
+ rx_ring->rxbufs = kvcalloc(rx_ring->cnt, sizeof(*rx_ring->rxbufs),
+ GFP_KERNEL);
if (!rx_ring->rxbufs)
goto err_alloc;
@@ -2508,6 +2521,8 @@ static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
/**
* nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
* @nn: NFP Net device to reconfigure
+ *
+ * Warning: must be fully idempotent.
*/
static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
{
@@ -3115,6 +3130,21 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void nfp_net_netpoll(struct net_device *netdev)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ int i;
+
+ /* nfp_net's NAPIs are statically allocated so even if there is a race
+ * with reconfig path this will simply try to schedule some disabled
+ * NAPI instances.
+ */
+ for (i = 0; i < nn->dp.num_stack_tx_rings; i++)
+ napi_schedule_irqoff(&nn->r_vecs[i].napi);
+}
+#endif
+
static void nfp_net_stat64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -3377,14 +3407,18 @@ static void nfp_net_del_vxlan_port(struct net_device *netdev,
nfp_net_set_vxlan_port(nn, idx, 0);
}
-static int
-nfp_net_xdp_setup_drv(struct nfp_net *nn, struct bpf_prog *prog,
- struct netlink_ext_ack *extack)
+static int nfp_net_xdp_setup_drv(struct nfp_net *nn, struct netdev_bpf *bpf)
{
+ struct bpf_prog *prog = bpf->prog;
struct nfp_net_dp *dp;
+ int err;
+
+ if (!xdp_attachment_flags_ok(&nn->xdp, bpf))
+ return -EBUSY;
if (!prog == !nn->dp.xdp_prog) {
WRITE_ONCE(nn->dp.xdp_prog, prog);
+ xdp_attachment_setup(&nn->xdp, bpf);
return 0;
}
@@ -3398,38 +3432,26 @@ nfp_net_xdp_setup_drv(struct nfp_net *nn, struct bpf_prog *prog,
dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0;
/* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */
- return nfp_net_ring_reconfig(nn, dp, extack);
+ err = nfp_net_ring_reconfig(nn, dp, bpf->extack);
+ if (err)
+ return err;
+
+ xdp_attachment_setup(&nn->xdp, bpf);
+ return 0;
}
-static int
-nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog, u32 flags,
- struct netlink_ext_ack *extack)
+static int nfp_net_xdp_setup_hw(struct nfp_net *nn, struct netdev_bpf *bpf)
{
- struct bpf_prog *drv_prog, *offload_prog;
int err;
- if (nn->xdp_prog && (flags ^ nn->xdp_flags) & XDP_FLAGS_MODES)
+ if (!xdp_attachment_flags_ok(&nn->xdp_hw, bpf))
return -EBUSY;
- /* Load both when no flags set to allow easy activation of driver path
- * when program is replaced by one which can't be offloaded.
- */
- drv_prog = flags & XDP_FLAGS_HW_MODE ? NULL : prog;
- offload_prog = flags & XDP_FLAGS_DRV_MODE ? NULL : prog;
-
- err = nfp_net_xdp_setup_drv(nn, drv_prog, extack);
+ err = nfp_app_xdp_offload(nn->app, nn, bpf->prog, bpf->extack);
if (err)
return err;
- err = nfp_app_xdp_offload(nn->app, nn, offload_prog, extack);
- if (err && flags & XDP_FLAGS_HW_MODE)
- return err;
-
- if (nn->xdp_prog)
- bpf_prog_put(nn->xdp_prog);
- nn->xdp_prog = prog;
- nn->xdp_flags = flags;
-
+ xdp_attachment_setup(&nn->xdp_hw, bpf);
return 0;
}
@@ -3439,16 +3461,13 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
+ return nfp_net_xdp_setup_drv(nn, xdp);
case XDP_SETUP_PROG_HW:
- return nfp_net_xdp_setup(nn, xdp->prog, xdp->flags,
- xdp->extack);
+ return nfp_net_xdp_setup_hw(nn, xdp);
case XDP_QUERY_PROG:
- xdp->prog_attached = !!nn->xdp_prog;
- if (nn->dp.bpf_offload_xdp)
- xdp->prog_attached = XDP_ATTACHED_HW;
- xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0;
- xdp->prog_flags = nn->xdp_prog ? nn->xdp_flags : 0;
- return 0;
+ return xdp_attachment_query(&nn->xdp, xdp);
+ case XDP_QUERY_PROG_HW:
+ return xdp_attachment_query(&nn->xdp_hw, xdp);
default:
return nfp_app_bpf(nn->app, nn, xdp);
}
@@ -3476,12 +3495,17 @@ static int nfp_net_set_mac_address(struct net_device *netdev, void *addr)
}
const struct net_device_ops nfp_net_netdev_ops = {
+ .ndo_init = nfp_app_ndo_init,
+ .ndo_uninit = nfp_app_ndo_uninit,
.ndo_open = nfp_net_netdev_open,
.ndo_stop = nfp_net_netdev_close,
.ndo_start_xmit = nfp_net_tx,
.ndo_get_stats64 = nfp_net_stat64,
.ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = nfp_net_netpoll,
+#endif
.ndo_set_vf_mac = nfp_app_set_vf_mac,
.ndo_set_vf_vlan = nfp_app_set_vf_vlan,
.ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
@@ -3840,6 +3864,9 @@ int nfp_net_init(struct nfp_net *nn)
nn->dp.mtu = NFP_NET_DEFAULT_MTU;
nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
+ if (nfp_app_ctrl_uses_data_vnics(nn->app))
+ nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_CMSG_DATA;
+
if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) {
nfp_net_rss_init(nn);
nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RSS2 ?:
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index bb63c115537d..44d3ea75d043 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -127,6 +127,7 @@
#define NFP_NET_CFG_CTRL_GATHER (0x1 << 9) /* Gather DMA */
#define NFP_NET_CFG_CTRL_LSO (0x1 << 10) /* LSO/TSO (version 1) */
#define NFP_NET_CFG_CTRL_CTAG_FILTER (0x1 << 11) /* VLAN CTAG filtering */
+#define NFP_NET_CFG_CTRL_CMSG_DATA (0x1 << 12) /* RX cmsgs on data Qs */
#define NFP_NET_CFG_CTRL_RINGCFG (0x1 << 16) /* Ring runtime changes */
#define NFP_NET_CFG_CTRL_RSS (0x1 << 17) /* RSS (version 1) */
#define NFP_NET_CFG_CTRL_IRQMOD (0x1 << 18) /* Interrupt moderation */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 26d1cc4e2906..6a79c8e4a7a4 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -233,12 +233,10 @@ nfp_net_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
static void
nfp_app_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
- struct nfp_app *app;
-
- app = nfp_app_from_netdev(netdev);
- if (!app)
- return;
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
+ strlcpy(drvinfo->bus_info, pci_name(app->pdev),
+ sizeof(drvinfo->bus_info));
nfp_get_drvinfo(app, app->pdev, "*", drvinfo);
}
@@ -452,7 +450,7 @@ static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- return NN_RVEC_GATHER_STATS + nn->dp.num_r_vecs * NN_RVEC_PER_Q_STATS;
+ return NN_RVEC_GATHER_STATS + nn->max_r_vecs * NN_RVEC_PER_Q_STATS;
}
static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
@@ -460,7 +458,7 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
struct nfp_net *nn = netdev_priv(netdev);
int i;
- for (i = 0; i < nn->dp.num_r_vecs; i++) {
+ for (i = 0; i < nn->max_r_vecs; i++) {
data = nfp_pr_et(data, "rvec_%u_rx_pkts", i);
data = nfp_pr_et(data, "rvec_%u_tx_pkts", i);
data = nfp_pr_et(data, "rvec_%u_tx_busy", i);
@@ -486,7 +484,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
u64 tmp[NN_RVEC_GATHER_STATS];
unsigned int i, j;
- for (i = 0; i < nn->dp.num_r_vecs; i++) {
+ for (i = 0; i < nn->max_r_vecs; i++) {
unsigned int start;
do {
@@ -521,15 +519,13 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
return data;
}
-static unsigned int
-nfp_vnic_get_hw_stats_count(unsigned int rx_rings, unsigned int tx_rings)
+static unsigned int nfp_vnic_get_hw_stats_count(unsigned int num_vecs)
{
- return NN_ET_GLOBAL_STATS_LEN + (rx_rings + tx_rings) * 2;
+ return NN_ET_GLOBAL_STATS_LEN + num_vecs * 4;
}
static u8 *
-nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int rx_rings,
- unsigned int tx_rings, bool repr)
+nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int num_vecs, bool repr)
{
int swap_off, i;
@@ -549,36 +545,29 @@ nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int rx_rings,
for (i = NN_ET_SWITCH_STATS_LEN * 2; i < NN_ET_GLOBAL_STATS_LEN; i++)
data = nfp_pr_et(data, nfp_net_et_stats[i].name);
- for (i = 0; i < tx_rings; i++) {
- data = nfp_pr_et(data, "txq_%u_pkts", i);
- data = nfp_pr_et(data, "txq_%u_bytes", i);
- }
-
- for (i = 0; i < rx_rings; i++) {
+ for (i = 0; i < num_vecs; i++) {
data = nfp_pr_et(data, "rxq_%u_pkts", i);
data = nfp_pr_et(data, "rxq_%u_bytes", i);
+ data = nfp_pr_et(data, "txq_%u_pkts", i);
+ data = nfp_pr_et(data, "txq_%u_bytes", i);
}
return data;
}
static u64 *
-nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem,
- unsigned int rx_rings, unsigned int tx_rings)
+nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem, unsigned int num_vecs)
{
unsigned int i;
for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++)
*data++ = readq(mem + nfp_net_et_stats[i].off);
- for (i = 0; i < tx_rings; i++) {
- *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i));
- *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8);
- }
-
- for (i = 0; i < rx_rings; i++) {
+ for (i = 0; i < num_vecs; i++) {
*data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i));
*data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i) + 8);
+ *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i));
+ *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8);
}
return data;
@@ -633,8 +622,7 @@ static void nfp_net_get_strings(struct net_device *netdev,
switch (stringset) {
case ETH_SS_STATS:
data = nfp_vnic_get_sw_stats_strings(netdev, data);
- data = nfp_vnic_get_hw_stats_strings(data, nn->dp.num_rx_rings,
- nn->dp.num_tx_rings,
+ data = nfp_vnic_get_hw_stats_strings(data, nn->max_r_vecs,
false);
data = nfp_mac_get_stats_strings(netdev, data);
data = nfp_app_port_get_stats_strings(nn->port, data);
@@ -649,8 +637,7 @@ nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
struct nfp_net *nn = netdev_priv(netdev);
data = nfp_vnic_get_sw_stats(netdev, data);
- data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar,
- nn->dp.num_rx_rings, nn->dp.num_tx_rings);
+ data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar, nn->max_r_vecs);
data = nfp_mac_get_stats(netdev, data);
data = nfp_app_port_get_stats(nn->port, data);
}
@@ -662,8 +649,7 @@ static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
switch (sset) {
case ETH_SS_STATS:
return nfp_vnic_get_sw_stats_count(netdev) +
- nfp_vnic_get_hw_stats_count(nn->dp.num_rx_rings,
- nn->dp.num_tx_rings) +
+ nfp_vnic_get_hw_stats_count(nn->max_r_vecs) +
nfp_mac_get_stats_count(netdev) +
nfp_app_port_get_stats_count(nn->port);
default:
@@ -679,7 +665,7 @@ static void nfp_port_get_strings(struct net_device *netdev,
switch (stringset) {
case ETH_SS_STATS:
if (nfp_port_is_vnic(port))
- data = nfp_vnic_get_hw_stats_strings(data, 0, 0, true);
+ data = nfp_vnic_get_hw_stats_strings(data, 0, true);
else
data = nfp_mac_get_stats_strings(netdev, data);
data = nfp_app_port_get_stats_strings(port, data);
@@ -694,7 +680,7 @@ nfp_port_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
struct nfp_port *port = nfp_port_from_netdev(netdev);
if (nfp_port_is_vnic(port))
- data = nfp_vnic_get_hw_stats(data, port->vnic, 0, 0);
+ data = nfp_vnic_get_hw_stats(data, port->vnic, 0);
else
data = nfp_mac_get_stats(netdev, data);
data = nfp_app_port_get_stats(port, data);
@@ -708,7 +694,7 @@ static int nfp_port_get_sset_count(struct net_device *netdev, int sset)
switch (sset) {
case ETH_SS_STATS:
if (nfp_port_is_vnic(port))
- count = nfp_vnic_get_hw_stats_count(0, 0);
+ count = nfp_vnic_get_hw_stats_count(0);
else
count = nfp_mac_get_stats_count(netdev);
count += nfp_app_port_get_stats_count(port);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index d7b712f6362f..18a09cdcd9c6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -262,6 +262,8 @@ err_port_disable:
}
const struct net_device_ops nfp_repr_netdev_ops = {
+ .ndo_init = nfp_app_ndo_init,
+ .ndo_uninit = nfp_app_ndo_uninit,
.ndo_open = nfp_repr_open,
.ndo_stop = nfp_repr_stop,
.ndo_start_xmit = nfp_repr_xmit,
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index 749655c329b2..c8d0b1016a64 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -1248,7 +1248,7 @@ static void nfp6000_free(struct nfp_cpp *cpp)
kfree(nfp);
}
-static void nfp6000_read_serial(struct device *dev, u8 *serial)
+static int nfp6000_read_serial(struct device *dev, u8 *serial)
{
struct pci_dev *pdev = to_pci_dev(dev);
int pos;
@@ -1256,25 +1256,29 @@ static void nfp6000_read_serial(struct device *dev, u8 *serial)
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
if (!pos) {
- memset(serial, 0, NFP_SERIAL_LEN);
- return;
+ dev_err(dev, "can't find PCIe Serial Number Capability\n");
+ return -EINVAL;
}
pci_read_config_dword(pdev, pos + 4, &reg);
put_unaligned_be16(reg >> 16, serial + 4);
pci_read_config_dword(pdev, pos + 8, &reg);
put_unaligned_be32(reg, serial);
+
+ return 0;
}
-static u16 nfp6000_get_interface(struct device *dev)
+static int nfp6000_get_interface(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
int pos;
u32 reg;
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
- if (!pos)
- return NFP_CPP_INTERFACE(NFP_CPP_INTERFACE_TYPE_PCI, 0, 0xff);
+ if (!pos) {
+ dev_err(dev, "can't find PCIe Serial Number Capability\n");
+ return -EINVAL;
+ }
pci_read_config_dword(pdev, pos + 4, &reg);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
index b0da3d436850..c338d539fa96 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
@@ -364,8 +364,8 @@ struct nfp_cpp_operations {
int (*init)(struct nfp_cpp *cpp);
void (*free)(struct nfp_cpp *cpp);
- void (*read_serial)(struct device *dev, u8 *serial);
- u16 (*get_interface)(struct device *dev);
+ int (*read_serial)(struct device *dev, u8 *serial);
+ int (*get_interface)(struct device *dev);
int (*area_init)(struct nfp_cpp_area *area,
u32 dest, unsigned long long address,
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index ef30597aa319..73de57a09800 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -1163,10 +1163,10 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
{
const u32 arm = NFP_CPP_ID(NFP_CPP_TARGET_ARM, NFP_CPP_ACTION_RW, 0);
struct nfp_cpp *cpp;
+ int ifc, err;
u32 mask[2];
u32 xpbaddr;
size_t tgt;
- int err;
cpp = kzalloc(sizeof(*cpp), GFP_KERNEL);
if (!cpp) {
@@ -1176,9 +1176,19 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
cpp->op = ops;
cpp->priv = priv;
- cpp->interface = ops->get_interface(parent);
- if (ops->read_serial)
- ops->read_serial(parent, cpp->serial);
+
+ ifc = ops->get_interface(parent);
+ if (ifc < 0) {
+ err = ifc;
+ goto err_free_cpp;
+ }
+ cpp->interface = ifc;
+ if (ops->read_serial) {
+ err = ops->read_serial(parent, cpp->serial);
+ if (err)
+ goto err_free_cpp;
+ }
+
rwlock_init(&cpp->resource_lock);
init_waitqueue_head(&cpp->waitq);
lockdep_set_class(&cpp->resource_lock, &nfp_cpp_resource_lock_key);
@@ -1191,7 +1201,7 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
err = device_register(&cpp->dev);
if (err < 0) {
put_device(&cpp->dev);
- goto err_dev;
+ goto err_free_cpp;
}
dev_set_drvdata(&cpp->dev, cpp);
@@ -1238,7 +1248,7 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
err_out:
device_unregister(&cpp->dev);
-err_dev:
+err_free_cpp:
kfree(cpp);
err_malloc:
return ERR_PTR(err);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
index 37a6d7822a38..40510860341b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
@@ -207,7 +207,7 @@ nffw_res_fwinfos(struct nfp_nffw_info_data *fwinf, struct nffw_fwinfo **arr)
* nfp_nffw_info_open() - Acquire the lock on the NFFW table
* @cpp: NFP CPP handle
*
- * Return: 0, or -ERRNO
+ * Return: pointer to nfp_nffw_info object or ERR_PTR()
*/
struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp)
{
@@ -253,10 +253,8 @@ err_free:
}
/**
- * nfp_nffw_info_release() - Release the lock on the NFFW table
+ * nfp_nffw_info_close() - Release the lock on the NFFW table and free state
* @state: NFP FW info state
- *
- * Return: 0, or -ERRNO
*/
void nfp_nffw_info_close(struct nfp_nffw_info *state)
{
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 09f674ec0f9e..76efed058f33 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -155,7 +155,6 @@ struct nixge_priv {
int tx_irq;
int rx_irq;
- u32 last_link;
/* Buffer descriptors */
struct nixge_hw_dma_bd *tx_bd_v;
@@ -504,7 +503,6 @@ static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
tx_skb->skb = skb;
cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
- cur_p->app4 = (unsigned long)skb;
tail_p = priv->tx_bd_p + sizeof(*priv->tx_bd_v) * priv->tx_bd_tail;
/* Start the transfer */
@@ -740,22 +738,12 @@ static void nixge_dma_err_handler(unsigned long data)
cur_p->phys = 0;
cur_p->cntrl = 0;
cur_p->status = 0;
- cur_p->app0 = 0;
- cur_p->app1 = 0;
- cur_p->app2 = 0;
- cur_p->app3 = 0;
- cur_p->app4 = 0;
cur_p->sw_id_offset = 0;
}
for (i = 0; i < RX_BD_NUM; i++) {
cur_p = &lp->rx_bd_v[i];
cur_p->status = 0;
- cur_p->app0 = 0;
- cur_p->app1 = 0;
- cur_p->app2 = 0;
- cur_p->app3 = 0;
- cur_p->app4 = 0;
}
lp->tx_bd_ci = 0;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 7cbd0174459c..1d9b0d44ddb6 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -5777,7 +5777,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
(np->rx_ring_size +
np->tx_ring_size),
&np->ring_addr,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!np->rx_ring.orig)
goto out_unmap;
np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
@@ -5786,7 +5786,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
sizeof(struct ring_desc_ex) *
(np->rx_ring_size +
np->tx_ring_size),
- &np->ring_addr, GFP_ATOMIC);
+ &np->ring_addr, GFP_KERNEL);
if (!np->rx_ring.ex)
goto out_unmap;
np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Makefile b/drivers/net/ethernet/oki-semi/pch_gbe/Makefile
index 31288d4ad248..862de0f3bc41 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Makefile
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_PCH_GBE) += pch_gbe.o
pch_gbe-y := pch_gbe_phy.o pch_gbe_ethtool.o pch_gbe_param.o
-pch_gbe-y += pch_gbe_api.o pch_gbe_main.o
+pch_gbe-y += pch_gbe_main.o
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 697e29dd4bd3..44c2f291e766 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -326,32 +326,6 @@ struct pch_gbe_regs {
#define PCH_GBE_FC_FULL 3
#define PCH_GBE_FC_DEFAULT PCH_GBE_FC_FULL
-
-struct pch_gbe_hw;
-/**
- * struct pch_gbe_functions - HAL APi function pointer
- * @get_bus_info: for pch_gbe_hal_get_bus_info
- * @init_hw: for pch_gbe_hal_init_hw
- * @read_phy_reg: for pch_gbe_hal_read_phy_reg
- * @write_phy_reg: for pch_gbe_hal_write_phy_reg
- * @reset_phy: for pch_gbe_hal_phy_hw_reset
- * @sw_reset_phy: for pch_gbe_hal_phy_sw_reset
- * @power_up_phy: for pch_gbe_hal_power_up_phy
- * @power_down_phy: for pch_gbe_hal_power_down_phy
- * @read_mac_addr: for pch_gbe_hal_read_mac_addr
- */
-struct pch_gbe_functions {
- void (*get_bus_info) (struct pch_gbe_hw *);
- s32 (*init_hw) (struct pch_gbe_hw *);
- s32 (*read_phy_reg) (struct pch_gbe_hw *, u32, u16 *);
- s32 (*write_phy_reg) (struct pch_gbe_hw *, u32, u16);
- void (*reset_phy) (struct pch_gbe_hw *);
- void (*sw_reset_phy) (struct pch_gbe_hw *);
- void (*power_up_phy) (struct pch_gbe_hw *hw);
- void (*power_down_phy) (struct pch_gbe_hw *hw);
- s32 (*read_mac_addr) (struct pch_gbe_hw *);
-};
-
/**
* struct pch_gbe_mac_info - MAC information
* @addr[6]: Store the MAC address
@@ -394,17 +368,6 @@ struct pch_gbe_phy_info {
/*!
* @ingroup Gigabit Ether driver Layer
- * @struct pch_gbe_bus_info
- * @brief Bus information
- */
-struct pch_gbe_bus_info {
- u8 type;
- u8 speed;
- u8 width;
-};
-
-/*!
- * @ingroup Gigabit Ether driver Layer
* @struct pch_gbe_hw
* @brief Hardware information
*/
@@ -414,10 +377,8 @@ struct pch_gbe_hw {
struct pch_gbe_regs __iomem *reg;
spinlock_t miim_lock;
- const struct pch_gbe_functions *func;
struct pch_gbe_mac_info mac;
struct pch_gbe_phy_info phy;
- struct pch_gbe_bus_info bus;
};
/**
@@ -680,7 +641,6 @@ void pch_gbe_set_ethtool_ops(struct net_device *netdev);
/* pch_gbe_mac.c */
s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw);
-s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw);
u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
u16 data);
#endif /* _PCH_GBE_H_ */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
deleted file mode 100644
index 51250363566b..000000000000
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * Copyright (C) 1999 - 2010 Intel Corporation.
- * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
- *
- * This code was derived from the Intel e1000e Linux driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-#include "pch_gbe.h"
-#include "pch_gbe_phy.h"
-#include "pch_gbe_api.h"
-
-/* bus type values */
-#define pch_gbe_bus_type_unknown 0
-#define pch_gbe_bus_type_pci 1
-#define pch_gbe_bus_type_pcix 2
-#define pch_gbe_bus_type_pci_express 3
-#define pch_gbe_bus_type_reserved 4
-
-/* bus speed values */
-#define pch_gbe_bus_speed_unknown 0
-#define pch_gbe_bus_speed_33 1
-#define pch_gbe_bus_speed_66 2
-#define pch_gbe_bus_speed_100 3
-#define pch_gbe_bus_speed_120 4
-#define pch_gbe_bus_speed_133 5
-#define pch_gbe_bus_speed_2500 6
-#define pch_gbe_bus_speed_reserved 7
-
-/* bus width values */
-#define pch_gbe_bus_width_unknown 0
-#define pch_gbe_bus_width_pcie_x1 1
-#define pch_gbe_bus_width_pcie_x2 2
-#define pch_gbe_bus_width_pcie_x4 4
-#define pch_gbe_bus_width_32 5
-#define pch_gbe_bus_width_64 6
-#define pch_gbe_bus_width_reserved 7
-
-/**
- * pch_gbe_plat_get_bus_info - Obtain bus information for adapter
- * @hw: Pointer to the HW structure
- */
-static void pch_gbe_plat_get_bus_info(struct pch_gbe_hw *hw)
-{
- hw->bus.type = pch_gbe_bus_type_pci_express;
- hw->bus.speed = pch_gbe_bus_speed_2500;
- hw->bus.width = pch_gbe_bus_width_pcie_x1;
-}
-
-/**
- * pch_gbe_plat_init_hw - Initialize hardware
- * @hw: Pointer to the HW structure
- * Returns:
- * 0: Successfully
- * Negative value: Failed-EBUSY
- */
-static s32 pch_gbe_plat_init_hw(struct pch_gbe_hw *hw)
-{
- s32 ret_val;
-
- ret_val = pch_gbe_phy_get_id(hw);
- if (ret_val) {
- struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
- netdev_err(adapter->netdev, "pch_gbe_phy_get_id error\n");
- return ret_val;
- }
- pch_gbe_phy_init_setting(hw);
- /* Setup Mac interface option RGMII */
-#ifdef PCH_GBE_MAC_IFOP_RGMII
- pch_gbe_phy_set_rgmii(hw);
-#endif
- return ret_val;
-}
-
-static const struct pch_gbe_functions pch_gbe_ops = {
- .get_bus_info = pch_gbe_plat_get_bus_info,
- .init_hw = pch_gbe_plat_init_hw,
- .read_phy_reg = pch_gbe_phy_read_reg_miic,
- .write_phy_reg = pch_gbe_phy_write_reg_miic,
- .reset_phy = pch_gbe_phy_hw_reset,
- .sw_reset_phy = pch_gbe_phy_sw_reset,
- .power_up_phy = pch_gbe_phy_power_up,
- .power_down_phy = pch_gbe_phy_power_down,
- .read_mac_addr = pch_gbe_mac_read_mac_addr
-};
-
-/**
- * pch_gbe_plat_init_function_pointers - Init func ptrs
- * @hw: Pointer to the HW structure
- */
-static void pch_gbe_plat_init_function_pointers(struct pch_gbe_hw *hw)
-{
- /* Set PHY parameter */
- hw->phy.reset_delay_us = PCH_GBE_PHY_RESET_DELAY_US;
- /* Set function pointers */
- hw->func = &pch_gbe_ops;
-}
-
-/**
- * pch_gbe_hal_setup_init_funcs - Initializes function pointers
- * @hw: Pointer to the HW structure
- * Returns:
- * 0: Successfully
- * ENOSYS: Function is not registered
- */
-s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw)
-{
- if (!hw->reg) {
- struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
- netdev_err(adapter->netdev, "ERROR: Registers not mapped\n");
- return -ENOSYS;
- }
- pch_gbe_plat_init_function_pointers(hw);
- return 0;
-}
-
-/**
- * pch_gbe_hal_get_bus_info - Obtain bus information for adapter
- * @hw: Pointer to the HW structure
- */
-void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw)
-{
- if (!hw->func->get_bus_info) {
- struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
- netdev_err(adapter->netdev, "ERROR: configuration\n");
- return;
- }
- hw->func->get_bus_info(hw);
-}
-
-/**
- * pch_gbe_hal_init_hw - Initialize hardware
- * @hw: Pointer to the HW structure
- * Returns:
- * 0: Successfully
- * ENOSYS: Function is not registered
- */
-s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw)
-{
- if (!hw->func->init_hw) {
- struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
- netdev_err(adapter->netdev, "ERROR: configuration\n");
- return -ENOSYS;
- }
- return hw->func->init_hw(hw);
-}
-
-/**
- * pch_gbe_hal_read_phy_reg - Reads PHY register
- * @hw: Pointer to the HW structure
- * @offset: The register to read
- * @data: The buffer to store the 16-bit read.
- * Returns:
- * 0: Successfully
- * Negative value: Failed
- */
-s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset,
- u16 *data)
-{
- if (!hw->func->read_phy_reg)
- return 0;
- return hw->func->read_phy_reg(hw, offset, data);
-}
-
-/**
- * pch_gbe_hal_write_phy_reg - Writes PHY register
- * @hw: Pointer to the HW structure
- * @offset: The register to read
- * @data: The value to write.
- * Returns:
- * 0: Successfully
- * Negative value: Failed
- */
-s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset,
- u16 data)
-{
- if (!hw->func->write_phy_reg)
- return 0;
- return hw->func->write_phy_reg(hw, offset, data);
-}
-
-/**
- * pch_gbe_hal_phy_hw_reset - Hard PHY reset
- * @hw: Pointer to the HW structure
- */
-void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw)
-{
- if (!hw->func->reset_phy) {
- struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
- netdev_err(adapter->netdev, "ERROR: configuration\n");
- return;
- }
- hw->func->reset_phy(hw);
-}
-
-/**
- * pch_gbe_hal_phy_sw_reset - Soft PHY reset
- * @hw: Pointer to the HW structure
- */
-void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw)
-{
- if (!hw->func->sw_reset_phy) {
- struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
- netdev_err(adapter->netdev, "ERROR: configuration\n");
- return;
- }
- hw->func->sw_reset_phy(hw);
-}
-
-/**
- * pch_gbe_hal_read_mac_addr - Reads MAC address
- * @hw: Pointer to the HW structure
- * Returns:
- * 0: Successfully
- * ENOSYS: Function is not registered
- */
-s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw)
-{
- if (!hw->func->read_mac_addr) {
- struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
- netdev_err(adapter->netdev, "ERROR: configuration\n");
- return -ENOSYS;
- }
- return hw->func->read_mac_addr(hw);
-}
-
-/**
- * pch_gbe_hal_power_up_phy - Power up PHY
- * @hw: Pointer to the HW structure
- */
-void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw)
-{
- if (hw->func->power_up_phy)
- hw->func->power_up_phy(hw);
-}
-
-/**
- * pch_gbe_hal_power_down_phy - Power down PHY
- * @hw: Pointer to the HW structure
- */
-void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw)
-{
- if (hw->func->power_down_phy)
- hw->func->power_down_phy(hw);
-}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h
deleted file mode 100644
index 91ce07c8306c..000000000000
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 1999 - 2010 Intel Corporation.
- * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
- *
- * This code was derived from the Intel e1000e Linux driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef _PCH_GBE_API_H_
-#define _PCH_GBE_API_H_
-
-#include "pch_gbe_phy.h"
-
-s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw);
-void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw);
-s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw);
-s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 *data);
-s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 data);
-void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw);
-void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw);
-s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw);
-void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw);
-void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw);
-
-#endif
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 731ce1e419e4..adaa0024adfe 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -17,7 +17,7 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "pch_gbe.h"
-#include "pch_gbe_api.h"
+#include "pch_gbe_phy.h"
/**
* pch_gbe_stats - Stats item information
@@ -125,7 +125,7 @@ static int pch_gbe_set_link_ksettings(struct net_device *netdev,
u32 advertising;
int ret;
- pch_gbe_hal_write_phy_reg(hw, MII_BMCR, BMCR_RESET);
+ pch_gbe_phy_write_reg_miic(hw, MII_BMCR, BMCR_RESET);
memcpy(&copy_ecmd, ecmd, sizeof(*ecmd));
@@ -204,7 +204,7 @@ static void pch_gbe_get_regs(struct net_device *netdev,
*regs_buff++ = ioread32(&hw->reg->INT_ST + i);
/* PHY register */
for (i = 0; i < PCH_GBE_PHY_REGS_LEN; i++) {
- pch_gbe_hal_read_phy_reg(&adapter->hw, i, &tmp);
+ pch_gbe_phy_read_reg_miic(&adapter->hw, i, &tmp);
*regs_buff++ = tmp;
}
}
@@ -349,25 +349,12 @@ static int pch_gbe_set_ringparam(struct net_device *netdev,
err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
if (err)
goto err_setup_tx;
- /* save the new, restore the old in order to free it,
- * then restore the new back again */
-#ifdef RINGFREE
- adapter->rx_ring = rx_old;
- adapter->tx_ring = tx_old;
- pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
- pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
- kfree(tx_old);
- kfree(rx_old);
- adapter->rx_ring = rxdr;
- adapter->tx_ring = txdr;
-#else
pch_gbe_free_rx_resources(adapter, rx_old);
pch_gbe_free_tx_resources(adapter, tx_old);
kfree(tx_old);
kfree(rx_old);
adapter->rx_ring = rxdr;
adapter->tx_ring = txdr;
-#endif
err = pch_gbe_up(adapter);
}
return err;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 34a1581eda95..43c0c10dfeb7 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -18,7 +18,7 @@
*/
#include "pch_gbe.h"
-#include "pch_gbe_api.h"
+#include "pch_gbe_phy.h"
#include <linux/module.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_classify.h>
@@ -34,7 +34,6 @@ const char pch_driver_version[] = DRV_VERSION;
#define PCH_GBE_DMA_ALIGN 0
#define PCH_GBE_DMA_PADDING 2
#define PCH_GBE_WATCHDOG_PERIOD (5 * HZ) /* watchdog time */
-#define PCH_GBE_COPYBREAK_DEFAULT 256
#define PCH_GBE_PCI_BAR 1
#define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */
@@ -113,8 +112,6 @@ const char pch_driver_version[] = DRV_VERSION;
#define MINNOW_PHY_RESET_GPIO 13
-static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
-
static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
int data);
@@ -290,7 +287,7 @@ static inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
* Returns:
* 0: Successful.
*/
-s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
+static s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
{
struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
u32 adr1a, adr1b;
@@ -369,9 +366,7 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
/* Read the MAC address. and store to the private data */
pch_gbe_mac_read_mac_addr(hw);
iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
-#ifdef PCH_GBE_MAC_IFOP_RGMII
iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
-#endif
pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
/* Setup the receive addresses */
pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
@@ -416,44 +411,6 @@ static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
}
-
-/**
- * pch_gbe_mac_mc_addr_list_update - Update Multicast addresses
- * @hw: Pointer to the HW structure
- * @mc_addr_list: Array of multicast addresses to program
- * @mc_addr_count: Number of multicast addresses to program
- * @mar_used_count: The first MAC Address register free to program
- * @mar_total_num: Total number of supported MAC Address Registers
- */
-static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
- u8 *mc_addr_list, u32 mc_addr_count,
- u32 mar_used_count, u32 mar_total_num)
-{
- u32 i, adrmask;
-
- /* Load the first set of multicast addresses into the exact
- * filters (RAR). If there are not enough to fill the RAR
- * array, clear the filters.
- */
- for (i = mar_used_count; i < mar_total_num; i++) {
- if (mc_addr_count) {
- pch_gbe_mac_mar_set(hw, mc_addr_list, i);
- mc_addr_count--;
- mc_addr_list += ETH_ALEN;
- } else {
- /* Clear MAC address mask */
- adrmask = ioread32(&hw->reg->ADDR_MASK);
- iowrite32((adrmask | (0x0001 << i)),
- &hw->reg->ADDR_MASK);
- /* wait busy */
- pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
- /* Clear MAC address */
- iowrite32(0, &hw->reg->mac_adr[i].high);
- iowrite32(0, &hw->reg->mac_adr[i].low);
- }
- }
-}
-
/**
* pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
* @hw: Pointer to the HW structure
@@ -763,14 +720,23 @@ void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
void pch_gbe_reset(struct pch_gbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ struct pch_gbe_hw *hw = &adapter->hw;
+ s32 ret_val;
- pch_gbe_mac_reset_hw(&adapter->hw);
+ pch_gbe_mac_reset_hw(hw);
/* reprogram multicast address register after reset */
pch_gbe_set_multi(netdev);
/* Setup the receive address. */
- pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
- if (pch_gbe_hal_init_hw(&adapter->hw))
- netdev_err(netdev, "Hardware Error\n");
+ pch_gbe_mac_init_rx_addrs(hw, PCH_GBE_MAR_ENTRIES);
+
+ ret_val = pch_gbe_phy_get_id(hw);
+ if (ret_val) {
+ netdev_err(adapter->netdev, "pch_gbe_phy_get_id error\n");
+ return;
+ }
+ pch_gbe_phy_init_setting(hw);
+ /* Setup Mac interface option RGMII */
+ pch_gbe_phy_set_rgmii(hw);
}
/**
@@ -1036,7 +1002,6 @@ static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
unsigned long rgmii = 0;
/* Set the RGMII control. */
-#ifdef PCH_GBE_MAC_IFOP_RGMII
switch (speed) {
case SPEED_10:
rgmii = (PCH_GBE_RGMII_RATE_2_5M |
@@ -1052,10 +1017,6 @@ static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
break;
}
iowrite32(rgmii, &hw->reg->RGMII_CTRL);
-#else /* GMII */
- rgmii = 0;
- iowrite32(rgmii, &hw->reg->RGMII_CTRL);
-#endif
}
static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
u16 duplex)
@@ -2029,12 +1990,8 @@ static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+ hw->phy.reset_delay_us = PCH_GBE_PHY_RESET_DELAY_US;
- /* Initialize the hardware-specific values */
- if (pch_gbe_hal_setup_init_funcs(hw)) {
- netdev_err(netdev, "Hardware Initialization Failure\n");
- return -EIO;
- }
if (pch_gbe_alloc_queues(adapter)) {
netdev_err(netdev, "Unable to allocate memory for queues\n");
return -ENOMEM;
@@ -2075,7 +2032,7 @@ static int pch_gbe_open(struct net_device *netdev)
err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
if (err)
goto err_setup_rx;
- pch_gbe_hal_power_up_phy(hw);
+ pch_gbe_phy_power_up(hw);
err = pch_gbe_up(adapter);
if (err)
goto err_up;
@@ -2084,7 +2041,7 @@ static int pch_gbe_open(struct net_device *netdev)
err_up:
if (!adapter->wake_up_evt)
- pch_gbe_hal_power_down_phy(hw);
+ pch_gbe_phy_power_down(hw);
pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
err_setup_rx:
pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
@@ -2107,7 +2064,7 @@ static int pch_gbe_stop(struct net_device *netdev)
pch_gbe_down(adapter);
if (!adapter->wake_up_evt)
- pch_gbe_hal_power_down_phy(hw);
+ pch_gbe_phy_power_down(hw);
pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
return 0;
@@ -2148,50 +2105,52 @@ static void pch_gbe_set_multi(struct net_device *netdev)
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
struct pch_gbe_hw *hw = &adapter->hw;
struct netdev_hw_addr *ha;
- u8 *mta_list;
- u32 rctl;
- int i;
- int mc_count;
+ u32 rctl, adrmask;
+ int mc_count, i;
netdev_dbg(netdev, "netdev->flags : 0x%08x\n", netdev->flags);
- /* Check for Promiscuous and All Multicast modes */
+ /* By default enable address & multicast filtering */
rctl = ioread32(&hw->reg->RX_MODE);
+ rctl |= PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN;
+
+ /* Promiscuous mode disables all hardware address filtering */
+ if (netdev->flags & IFF_PROMISC)
+ rctl &= ~(PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
+
+ /* If we want to monitor more multicast addresses than the hardware can
+ * support then disable hardware multicast filtering.
+ */
mc_count = netdev_mc_count(netdev);
- if ((netdev->flags & IFF_PROMISC)) {
- rctl &= ~PCH_GBE_ADD_FIL_EN;
- rctl &= ~PCH_GBE_MLT_FIL_EN;
- } else if ((netdev->flags & IFF_ALLMULTI)) {
- /* all the multicasting receive permissions */
- rctl |= PCH_GBE_ADD_FIL_EN;
+ if ((netdev->flags & IFF_ALLMULTI) || mc_count >= PCH_GBE_MAR_ENTRIES)
rctl &= ~PCH_GBE_MLT_FIL_EN;
- } else {
- if (mc_count >= PCH_GBE_MAR_ENTRIES) {
- /* all the multicasting receive permissions */
- rctl |= PCH_GBE_ADD_FIL_EN;
- rctl &= ~PCH_GBE_MLT_FIL_EN;
- } else {
- rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
- }
- }
+
iowrite32(rctl, &hw->reg->RX_MODE);
- if (mc_count >= PCH_GBE_MAR_ENTRIES)
- return;
- mta_list = kmalloc_array(ETH_ALEN, mc_count, GFP_ATOMIC);
- if (!mta_list)
+ /* If we're not using multicast filtering then there's no point
+ * configuring the unused MAC address registers.
+ */
+ if (!(rctl & PCH_GBE_MLT_FIL_EN))
return;
- /* The shared function expects a packed array of only addresses. */
- i = 0;
- netdev_for_each_mc_addr(ha, netdev) {
- if (i == mc_count)
- break;
- memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN);
+ /* Load the first set of multicast addresses into MAC address registers
+ * for use by hardware filtering.
+ */
+ i = 1;
+ netdev_for_each_mc_addr(ha, netdev)
+ pch_gbe_mac_mar_set(hw, ha->addr, i++);
+
+ /* If there are spare MAC registers, mask & clear them */
+ for (; i < PCH_GBE_MAR_ENTRIES; i++) {
+ /* Clear MAC address mask */
+ adrmask = ioread32(&hw->reg->ADDR_MASK);
+ iowrite32(adrmask | BIT(i), &hw->reg->ADDR_MASK);
+ /* wait busy */
+ pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
+ /* Clear MAC address */
+ iowrite32(0, &hw->reg->mac_adr[i].high);
+ iowrite32(0, &hw->reg->mac_adr[i].low);
}
- pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1,
- PCH_GBE_MAR_ENTRIES);
- kfree(mta_list);
netdev_dbg(netdev,
"RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n",
@@ -2437,7 +2396,7 @@ static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
}
pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D0, 0);
- pch_gbe_hal_power_up_phy(hw);
+ pch_gbe_phy_power_up(hw);
pch_gbe_reset(adapter);
/* Clear wake up status */
pch_gbe_mac_set_wol_event(hw, 0);
@@ -2482,7 +2441,7 @@ static int __pch_gbe_suspend(struct pci_dev *pdev)
pch_gbe_mac_set_wol_event(hw, wufc);
pci_disable_device(pdev);
} else {
- pch_gbe_hal_power_down_phy(hw);
+ pch_gbe_phy_power_down(hw);
pch_gbe_mac_set_wol_event(hw, wufc);
pci_disable_device(pdev);
}
@@ -2511,7 +2470,7 @@ static int pch_gbe_resume(struct device *device)
return err;
}
pci_set_master(pdev);
- pch_gbe_hal_power_up_phy(hw);
+ pch_gbe_phy_power_up(hw);
pch_gbe_reset(adapter);
/* Clear wake on lan control and status */
pch_gbe_mac_set_wol_event(hw, 0);
@@ -2541,7 +2500,7 @@ static void pch_gbe_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->reset_task);
unregister_netdev(netdev);
- pch_gbe_hal_phy_hw_reset(&adapter->hw);
+ pch_gbe_phy_hw_reset(&adapter->hw);
free_netdev(netdev);
}
@@ -2627,10 +2586,9 @@ static int pch_gbe_probe(struct pci_dev *pdev,
dev_err(&pdev->dev, "PHY initialize error\n");
goto err_free_adapter;
}
- pch_gbe_hal_get_bus_info(&adapter->hw);
/* Read the MAC address. and store to the private data */
- ret = pch_gbe_hal_read_mac_addr(&adapter->hw);
+ ret = pch_gbe_mac_read_mac_addr(&adapter->hw);
if (ret) {
dev_err(&pdev->dev, "MAC address Read Error\n");
goto err_free_adapter;
@@ -2677,7 +2635,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
return 0;
err_free_adapter:
- pch_gbe_hal_phy_hw_reset(&adapter->hw);
+ pch_gbe_phy_hw_reset(&adapter->hw);
err_free_netdev:
free_netdev(netdev);
return ret;
@@ -2776,32 +2734,7 @@ static struct pci_driver pch_gbe_driver = {
.shutdown = pch_gbe_shutdown,
.err_handler = &pch_gbe_err_handler
};
-
-
-static int __init pch_gbe_init_module(void)
-{
- int ret;
-
- pr_info("EG20T PCH Gigabit Ethernet Driver - version %s\n",DRV_VERSION);
- ret = pci_register_driver(&pch_gbe_driver);
- if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
- if (copybreak == 0) {
- pr_info("copybreak disabled\n");
- } else {
- pr_info("copybreak enabled for packets <= %u bytes\n",
- copybreak);
- }
- }
- return ret;
-}
-
-static void __exit pch_gbe_exit_module(void)
-{
- pci_unregister_driver(&pch_gbe_driver);
-}
-
-module_init(pch_gbe_init_module);
-module_exit(pch_gbe_exit_module);
+module_pci_driver(pch_gbe_driver);
MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>");
@@ -2809,8 +2742,4 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
-module_param(copybreak, uint, 0644);
-MODULE_PARM_DESC(copybreak,
- "Maximum size of packet that is copied to a new buffer on receive");
-
/* pch_gbe_main.c */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
index a5cad5ea9436..6b35b573beef 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
@@ -184,7 +184,7 @@ s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data)
* pch_gbe_phy_sw_reset - PHY software reset
* @hw: Pointer to the HW structure
*/
-void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw)
+static void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw)
{
u16 phy_ctrl;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
index 95ad0151ad02..23ac38711619 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
@@ -21,12 +21,10 @@
#define PCH_GBE_PHY_REGS_LEN 32
#define PCH_GBE_PHY_RESET_DELAY_US 10
-#define PCH_GBE_MAC_IFOP_RGMII
s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw);
s32 pch_gbe_phy_read_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 *data);
s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data);
-void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw);
void pch_gbe_phy_hw_reset(struct pch_gbe_hw *hw);
void pch_gbe_phy_power_up(struct pch_gbe_hw *hw);
void pch_gbe_phy_power_down(struct pch_gbe_hw *hw);
diff --git a/drivers/net/ethernet/packetengines/Kconfig b/drivers/net/ethernet/packetengines/Kconfig
index b5ea2a56106e..1df28f2edd1f 100644
--- a/drivers/net/ethernet/packetengines/Kconfig
+++ b/drivers/net/ethernet/packetengines/Kconfig
@@ -2,7 +2,7 @@
# Packet engine device configuration
#
-config NET_PACKET_ENGINE
+config NET_VENDOR_PACKET_ENGINES
bool "Packet Engine devices"
default y
depends on PCI
@@ -14,7 +14,7 @@ config NET_PACKET_ENGINE
the questions about packet engine devices. If you say Y, you will
be asked for your specific card in the following questions.
-if NET_PACKET_ENGINE
+if NET_VENDOR_PACKET_ENGINES
config HAMACHI
tristate "Packet Engines Hamachi GNIC-II support"
@@ -40,4 +40,4 @@ config YELLOWFIN
To compile this driver as a module, choose M here: the module
will be called yellowfin. This is recommended.
-endif # NET_PACKET_ENGINE
+endif # NET_VENDOR_PACKET_ENGINES
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 3157f97dd782..3c1be87cdfa5 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -167,9 +167,9 @@ skip:
case NETXEN_BRDTYPE_P3_REF_QG:
case NETXEN_BRDTYPE_P3_4_GB:
case NETXEN_BRDTYPE_P3_4_GB_MM:
-
supported |= SUPPORTED_Autoneg;
advertising |= ADVERTISED_Autoneg;
+ /* fall through */
case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
case NETXEN_BRDTYPE_P3_10G_CX4:
case NETXEN_BRDTYPE_P3_10G_CX4_LP:
@@ -198,6 +198,7 @@ skip:
supported |= SUPPORTED_TP;
check_sfp_module = netif_running(dev) &&
adapter->has_link_events;
+ /* fall through */
case NETXEN_BRDTYPE_P2_SB31_10G:
case NETXEN_BRDTYPE_P3_10G_XFP:
supported |= SUPPORTED_FIBRE;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 1cd39c9a0345..52ad80621335 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -566,9 +566,8 @@ static int
netxen_send_cmd_descs(struct netxen_adapter *adapter,
struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
{
- u32 i, producer, consumer;
+ u32 i, producer;
struct netxen_cmd_buffer *pbuf;
- struct cmd_desc_type0 *cmd_desc;
struct nx_host_tx_ring *tx_ring;
i = 0;
@@ -580,7 +579,6 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
__netif_tx_lock_bh(tx_ring->txq);
producer = tx_ring->producer;
- consumer = tx_ring->sw_consumer;
if (nr_desc >= netxen_tx_avail(tx_ring)) {
netif_tx_stop_queue(tx_ring->txq);
@@ -595,8 +593,6 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
}
do {
- cmd_desc = &cmd_desc_arr[i];
-
pbuf = &tx_ring->cmd_buf_arr[producer];
pbuf->skb = NULL;
pbuf->frag_count = 0;
@@ -2350,7 +2346,7 @@ static int netxen_md_entry_err_chk(struct netxen_adapter *adapter,
static int netxen_parse_md_template(struct netxen_adapter *adapter)
{
int num_of_entries, buff_level, e_cnt, esize;
- int end_cnt = 0, rv = 0, sane_start = 0, sane_end = 0;
+ int rv = 0, sane_start = 0, sane_end = 0;
char *dbuff;
void *template_buff = adapter->mdump.md_template;
char *dump_buff = adapter->mdump.md_capture_buff;
@@ -2386,8 +2382,6 @@ static int netxen_parse_md_template(struct netxen_adapter *adapter)
break;
case RDEND:
entry->hdr.driver_flags |= NX_DUMP_SKIP;
- if (!sane_end)
- end_cnt = e_cnt;
sane_end += 1;
break;
case CNTRL:
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 8259e8309320..69aa7fc392c5 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2073,7 +2073,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
struct skb_frag_struct *frag;
u32 producer;
- int frag_count, no_of_desc;
+ int frag_count;
u32 num_txd = tx_ring->num_desc;
frag_count = skb_shinfo(skb)->nr_frags + 1;
@@ -2093,8 +2093,6 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
frag_count = 1 + skb_shinfo(skb)->nr_frags;
}
- /* 4 fragments per cmd des */
- no_of_desc = (frag_count + 3) >> 2;
if (unlikely(netxen_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
netif_stop_queue(netdev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 1dfaccd151f0..a60e1c8d470a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -336,6 +336,10 @@ struct qed_hw_info {
*/
u8 num_active_tc;
u8 offload_tc;
+ bool offload_tc_set;
+
+ bool multi_tc_roce_en;
+#define IS_QED_MULTI_TC_ROCE(p_hwfn) (((p_hwfn)->hw_info.multi_tc_roce_en))
u32 concrete_fid;
u16 opaque_fid;
@@ -399,8 +403,8 @@ struct qed_qm_info {
u16 start_pq;
u8 start_vport;
u16 pure_lb_pq;
- u16 offload_pq;
- u16 low_latency_pq;
+ u16 first_ofld_pq;
+ u16 first_llt_pq;
u16 pure_ack_pq;
u16 ooo_pq;
u16 first_vf_pq;
@@ -881,11 +885,14 @@ void qed_set_fw_mac_addr(__le16 *fw_msb,
#define PQ_FLAGS_OFLD (BIT(5))
#define PQ_FLAGS_VFS (BIT(6))
#define PQ_FLAGS_LLT (BIT(7))
+#define PQ_FLAGS_MTC (BIT(8))
/* physical queue index for cm context intialization */
u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags);
u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc);
u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf);
+u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc);
+u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc);
#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
@@ -921,4 +928,6 @@ int qed_mfw_tlv_req(struct qed_hwfn *hwfn);
int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn,
enum qed_mfw_tlv_type type,
union qed_mfw_tlv_data *tlv_data);
+
+void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc);
#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index b5b5ff725426..f1977aa440e5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -1531,7 +1531,7 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
}
/* CM PF */
-void qed_cm_init_pf(struct qed_hwfn *p_hwfn)
+static void qed_cm_init_pf(struct qed_hwfn *p_hwfn)
{
/* XCM pure-LB queue */
STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index e0680ce91328..6bb76e6d3c14 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -208,7 +208,7 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
/* QM reconf data */
if (p_info->personality == personality)
- p_info->offload_tc = tc;
+ qed_hw_info_set_offload_tc(p_info, tc);
}
/* Update app protocol data and hw_info fields with the TLV info */
@@ -221,7 +221,6 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
struct qed_hw_info *p_info = &p_hwfn->hw_info;
enum qed_pci_personality personality;
enum dcbx_protocol_type id;
- char *name;
int i;
for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) {
@@ -231,7 +230,6 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
continue;
personality = qed_dcbx_app_update[i].personality;
- name = qed_dcbx_app_update[i].name;
qed_dcbx_set_params(p_data, p_info, enable,
prio, tc, type, personality);
@@ -869,7 +867,7 @@ static int qed_dcbx_read_mib(struct qed_hwfn *p_hwfn,
return rc;
}
-void qed_dcbx_aen(struct qed_hwfn *hwfn, u32 mib_type)
+static void qed_dcbx_aen(struct qed_hwfn *hwfn, u32 mib_type)
{
struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
void *cookie = hwfn->cdev->ops_cookie;
@@ -991,6 +989,24 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH);
}
+u8 qed_dcbx_get_priority_tc(struct qed_hwfn *p_hwfn, u8 pri)
+{
+ struct qed_dcbx_get *dcbx_info = &p_hwfn->p_dcbx_info->get;
+
+ if (pri >= QED_MAX_PFC_PRIORITIES) {
+ DP_ERR(p_hwfn, "Invalid priority %d\n", pri);
+ return QED_DCBX_DEFAULT_TC;
+ }
+
+ if (!dcbx_info->operational.valid) {
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "Dcbx parameters not available\n");
+ return QED_DCBX_DEFAULT_TC;
+ }
+
+ return dcbx_info->operational.params.ets_pri_tc_tbl[pri];
+}
+
#ifdef CONFIG_DCB
static int qed_dcbx_query_params(struct qed_hwfn *p_hwfn,
struct qed_dcbx_get *p_get,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
index 5feb90e049e0..a4d688c04e18 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -123,4 +123,7 @@ void qed_dcbx_info_free(struct qed_hwfn *p_hwfn);
void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
struct pf_update_ramrod_data *p_dest);
+#define QED_DCBX_DEFAULT_TC 0
+
+u8 qed_dcbx_get_priority_tc(struct qed_hwfn *p_hwfn, u8 pri);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 4340c4c90bcb..1aa9fc1c5890 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -7838,8 +7838,8 @@ int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
}
-int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
- enum qed_nvm_images image_id, u32 *length)
+static int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
+ enum qed_nvm_images image_id, u32 *length)
{
struct qed_nvm_image_att image_att;
int rc;
@@ -7854,8 +7854,9 @@ int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
return rc;
}
-int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
- u32 *num_dumped_bytes, enum qed_nvm_images image_id)
+static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes,
+ enum qed_nvm_images image_id)
{
struct qed_hwfn *p_hwfn =
&cdev->hwfns[cdev->dbg_params.engine_for_debug];
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index e5249b4741d0..016ca8a7ec8a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -215,6 +215,8 @@ static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn)
break;
case QED_PCI_ETH_ROCE:
flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT;
+ if (IS_QED_MULTI_TC_ROCE(p_hwfn))
+ flags |= PQ_FLAGS_MTC;
break;
case QED_PCI_ETH_IWARP:
flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO |
@@ -230,20 +232,30 @@ static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn)
}
/* Getters for resource amounts necessary for qm initialization */
-u8 qed_init_qm_get_num_tcs(struct qed_hwfn *p_hwfn)
+static u8 qed_init_qm_get_num_tcs(struct qed_hwfn *p_hwfn)
{
return p_hwfn->hw_info.num_hw_tc;
}
-u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn)
+static u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn)
{
return IS_QED_SRIOV(p_hwfn->cdev) ?
p_hwfn->cdev->p_iov_info->total_vfs : 0;
}
+static u8 qed_init_qm_get_num_mtc_tcs(struct qed_hwfn *p_hwfn)
+{
+ u32 pq_flags = qed_get_pq_flags(p_hwfn);
+
+ if (!(PQ_FLAGS_MTC & pq_flags))
+ return 1;
+
+ return qed_init_qm_get_num_tcs(p_hwfn);
+}
+
#define NUM_DEFAULT_RLS 1
-u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn)
+static u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn)
{
u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn);
@@ -261,7 +273,7 @@ u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn)
return num_pf_rls;
}
-u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn)
+static u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn)
{
u32 pq_flags = qed_get_pq_flags(p_hwfn);
@@ -273,7 +285,7 @@ u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn)
}
/* calc amount of PQs according to the requested flags */
-u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn)
+static u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn)
{
u32 pq_flags = qed_get_pq_flags(p_hwfn);
@@ -282,8 +294,11 @@ u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn)
(!!(PQ_FLAGS_MCOS & pq_flags)) *
qed_init_qm_get_num_tcs(p_hwfn) +
(!!(PQ_FLAGS_LB & pq_flags)) + (!!(PQ_FLAGS_OOO & pq_flags)) +
- (!!(PQ_FLAGS_ACK & pq_flags)) + (!!(PQ_FLAGS_OFLD & pq_flags)) +
- (!!(PQ_FLAGS_LLT & pq_flags)) +
+ (!!(PQ_FLAGS_ACK & pq_flags)) +
+ (!!(PQ_FLAGS_OFLD & pq_flags)) *
+ qed_init_qm_get_num_mtc_tcs(p_hwfn) +
+ (!!(PQ_FLAGS_LLT & pq_flags)) *
+ qed_init_qm_get_num_mtc_tcs(p_hwfn) +
(!!(PQ_FLAGS_VFS & pq_flags)) * qed_init_qm_get_num_vfs(p_hwfn);
}
@@ -394,7 +409,25 @@ static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn)
/* defines for pq init */
#define PQ_INIT_DEFAULT_WRR_GROUP 1
#define PQ_INIT_DEFAULT_TC 0
-#define PQ_INIT_OFLD_TC (p_hwfn->hw_info.offload_tc)
+
+void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc)
+{
+ p_info->offload_tc = tc;
+ p_info->offload_tc_set = true;
+}
+
+static bool qed_is_offload_tc_set(struct qed_hwfn *p_hwfn)
+{
+ return p_hwfn->hw_info.offload_tc_set;
+}
+
+static u32 qed_get_offload_tc(struct qed_hwfn *p_hwfn)
+{
+ if (qed_is_offload_tc_set(p_hwfn))
+ return p_hwfn->hw_info.offload_tc;
+
+ return PQ_INIT_DEFAULT_TC;
+}
static void qed_init_qm_pq(struct qed_hwfn *p_hwfn,
struct qed_qm_info *qm_info,
@@ -456,9 +489,9 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
case PQ_FLAGS_ACK:
return &qm_info->pure_ack_pq;
case PQ_FLAGS_OFLD:
- return &qm_info->offload_pq;
+ return &qm_info->first_ofld_pq;
case PQ_FLAGS_LLT:
- return &qm_info->low_latency_pq;
+ return &qm_info->first_llt_pq;
case PQ_FLAGS_VFS:
return &qm_info->first_vf_pq;
default:
@@ -507,14 +540,26 @@ u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf)
return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
}
-u16 qed_get_cm_pq_idx_rl(struct qed_hwfn *p_hwfn, u8 rl)
+u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc)
+{
+ u16 first_ofld_pq, pq_offset;
+
+ first_ofld_pq = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+ pq_offset = (tc < qed_init_qm_get_num_mtc_tcs(p_hwfn)) ?
+ tc : PQ_INIT_DEFAULT_TC;
+
+ return first_ofld_pq + pq_offset;
+}
+
+u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc)
{
- u16 max_rl = qed_init_qm_get_num_pf_rls(p_hwfn);
+ u16 first_llt_pq, pq_offset;
- if (rl > max_rl)
- DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl);
+ first_llt_pq = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LLT);
+ pq_offset = (tc < qed_init_qm_get_num_mtc_tcs(p_hwfn)) ?
+ tc : PQ_INIT_DEFAULT_TC;
- return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl;
+ return first_llt_pq + pq_offset;
}
/* Functions for creating specific types of pqs */
@@ -548,7 +593,22 @@ static void qed_init_qm_pure_ack_pq(struct qed_hwfn *p_hwfn)
return;
qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs);
- qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+ qed_init_qm_pq(p_hwfn, qm_info, qed_get_offload_tc(p_hwfn),
+ PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_mtc_pqs(struct qed_hwfn *p_hwfn)
+{
+ u8 num_tcs = qed_init_qm_get_num_mtc_tcs(p_hwfn);
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ u8 tc;
+
+ /* override pq's TC if offload TC is set */
+ for (tc = 0; tc < num_tcs; tc++)
+ qed_init_qm_pq(p_hwfn, qm_info,
+ qed_is_offload_tc_set(p_hwfn) ?
+ p_hwfn->hw_info.offload_tc : tc,
+ PQ_INIT_SHARE_VPORT);
}
static void qed_init_qm_offload_pq(struct qed_hwfn *p_hwfn)
@@ -559,7 +619,7 @@ static void qed_init_qm_offload_pq(struct qed_hwfn *p_hwfn)
return;
qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs);
- qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+ qed_init_qm_mtc_pqs(p_hwfn);
}
static void qed_init_qm_low_latency_pq(struct qed_hwfn *p_hwfn)
@@ -570,7 +630,7 @@ static void qed_init_qm_low_latency_pq(struct qed_hwfn *p_hwfn)
return;
qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs);
- qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+ qed_init_qm_mtc_pqs(p_hwfn);
}
static void qed_init_qm_mcos_pqs(struct qed_hwfn *p_hwfn)
@@ -611,7 +671,8 @@ static void qed_init_qm_rl_pqs(struct qed_hwfn *p_hwfn)
qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs);
for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++)
- qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_PF_RL);
+ qed_init_qm_pq(p_hwfn, qm_info, qed_get_offload_tc(p_hwfn),
+ PQ_INIT_PF_RL);
}
static void qed_init_qm_pq_params(struct qed_hwfn *p_hwfn)
@@ -652,12 +713,19 @@ static int qed_init_qm_sanity(struct qed_hwfn *p_hwfn)
return -EINVAL;
}
- if (qed_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, QED_PQ)) {
- DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n");
- return -EINVAL;
+ if (qed_init_qm_get_num_pqs(p_hwfn) <= RESC_NUM(p_hwfn, QED_PQ))
+ return 0;
+
+ if (QED_IS_ROCE_PERSONALITY(p_hwfn)) {
+ p_hwfn->hw_info.multi_tc_roce_en = 0;
+ DP_NOTICE(p_hwfn,
+ "multi-tc roce was disabled to reduce requested amount of pqs\n");
+ if (qed_init_qm_get_num_pqs(p_hwfn) <= RESC_NUM(p_hwfn, QED_PQ))
+ return 0;
}
- return 0;
+ DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n");
+ return -EINVAL;
}
static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
@@ -671,11 +739,13 @@ static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
/* top level params */
DP_VERBOSE(p_hwfn,
NETIF_MSG_HW,
- "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n",
+ "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, llt_pq %d, pure_ack_pq %d\n",
qm_info->start_pq,
qm_info->start_vport,
qm_info->pure_lb_pq,
- qm_info->offload_pq, qm_info->pure_ack_pq);
+ qm_info->first_ofld_pq,
+ qm_info->first_llt_pq,
+ qm_info->pure_ack_pq);
DP_VERBOSE(p_hwfn,
NETIF_MSG_HW,
"ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n",
@@ -1719,14 +1789,14 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
p_hwfn->hw_info.hw_mode);
if (rc)
break;
- /* Fall into */
+ /* Fall through */
case FW_MSG_CODE_DRV_LOAD_PORT:
rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
p_hwfn->hw_info.hw_mode);
if (rc)
break;
- /* Fall into */
+ /* Fall through */
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
p_params->p_tunn,
@@ -2908,6 +2978,9 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.personality = protocol;
}
+ if (QED_IS_ROCE_PERSONALITY(p_hwfn))
+ p_hwfn->hw_info.multi_tc_roce_en = 1;
+
p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
p_hwfn->hw_info.num_active_tc = 1;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index bee10c1781fb..8faceb691657 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -12444,6 +12444,8 @@ struct public_drv_mb {
#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3
#define DRV_MSG_CODE_STATS_TYPE_RDMA 4
+#define DRV_MSG_CODE_TRANSCEIVER_READ 0x00160000
+
#define DRV_MSG_CODE_MASK_PARITIES 0x001a0000
#define DRV_MSG_CODE_BIST_TEST 0x001e0000
@@ -12543,6 +12545,15 @@ struct public_drv_mb {
#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET 0
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET 2
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000FC
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET 8
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000FF00
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET 16
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xFFFF0000
+
/* Resource Allocation params - Driver version support */
#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
@@ -12596,6 +12607,9 @@ struct public_drv_mb {
#define FW_MSG_CODE_PHY_OK 0x00110000
#define FW_MSG_CODE_OK 0x00160000
#define FW_MSG_CODE_ERROR 0x00170000
+#define FW_MSG_CODE_TRANSCEIVER_DIAG_OK 0x00160000
+#define FW_MSG_CODE_TRANSCEIVER_DIAG_ERROR 0x00170000
+#define FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT 0x00020000
#define FW_MSG_CODE_OS_WOL_SUPPORTED 0x00800000
#define FW_MSG_CODE_OS_WOL_NOT_SUPPORTED 0x00810000
@@ -12687,6 +12701,8 @@ struct mcp_public_data {
struct public_func func[MCP_GLOB_FUNC_MAX];
};
+#define MAX_I2C_TRANSACTION_SIZE 16
+
/* OCBB definitions */
enum tlvs {
/* Category 1: Device Properties */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index d845badf9b90..d6430dfebd83 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -1225,19 +1225,6 @@ void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
0);
}
-void qed_set_gft_event_id_cm_hdr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
-{
- u32 rfs_cm_hdr_event_id;
-
- /* Set RFS event ID to be awakened i Tstorm By Prs */
- rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
- rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
- PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
- rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
- PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
- qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
-}
-
void qed_gft_config(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 pf_id,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index c0d4a54a5edb..1135387bd99d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -873,8 +873,8 @@ static void qed_iscsi_release_connection(struct qed_hwfn *p_hwfn,
spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
}
-void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn,
- struct qed_iscsi_conn *p_conn)
+static void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_conn *p_conn)
{
qed_chain_free(p_hwfn->cdev, &p_conn->xhq);
qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 90a2b53096e2..17f3dfa2cc94 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -377,7 +377,7 @@ qed_iwarp2roce_state(enum qed_iwarp_qp_state state)
}
}
-const char *iwarp_state_names[] = {
+const static char *iwarp_state_names[] = {
"IDLE",
"RTS",
"TERMINATE",
@@ -942,7 +942,7 @@ qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
}
-void
+static void
qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
{
struct mpa_v2_hdr *mpa_v2_params;
@@ -967,7 +967,7 @@ qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
mpa_data_size;
}
-void
+static void
qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
{
struct qed_iwarp_cm_event_params params;
@@ -2500,7 +2500,7 @@ static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
/* The only slowpath for iwarp ll2 is unalign flush. When this completion
* is received, need to reset the FPDU.
*/
-void
+static void
qed_iwarp_ll2_slowpath(void *cxt,
u8 connection_handle,
u32 opaque_data_0, u32 opaque_data_1)
@@ -2803,8 +2803,9 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return qed_iwarp_ll2_stop(p_hwfn, p_ptt);
}
-void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
- struct qed_iwarp_ep *ep, u8 fw_return_code)
+static void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_ep *ep,
+ u8 fw_return_code)
{
struct qed_iwarp_cm_event_params params;
@@ -2824,8 +2825,9 @@ void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
ep->event_cb(ep->cb_context, &params);
}
-void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn,
- struct qed_iwarp_ep *ep, int fw_ret_code)
+static void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_ep *ep,
+ int fw_ret_code)
{
struct qed_iwarp_cm_event_params params;
bool event_cb = false;
@@ -2954,7 +2956,7 @@ qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn,
}
}
-void
+static void
qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn,
struct qed_iwarp_ep *ep, u8 fw_return_code)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 5ede6408649d..82a1bd1f8a8c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -2188,16 +2188,17 @@ out:
static int qed_fill_eth_dev_info(struct qed_dev *cdev,
struct qed_dev_eth_info *info)
{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
int i;
memset(info, 0, sizeof(*info));
- info->num_tc = 1;
-
if (IS_PF(cdev)) {
int max_vf_vlan_filters = 0;
int max_vf_mac_filters = 0;
+ info->num_tc = p_hwfn->hw_info.num_hw_tc;
+
if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
u16 num_queues = 0;
@@ -2248,6 +2249,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
} else {
u16 total_cids = 0;
+ info->num_tc = 1;
+
/* Determine queues & XDP support */
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -2554,7 +2557,7 @@ static int qed_start_txq(struct qed_dev *cdev,
rc = qed_eth_tx_queue_start(p_hwfn,
p_hwfn->hw_info.opaque_fid,
- p_params, 0,
+ p_params, p_params->tc,
pbl_addr, pbl_size, ret_params);
if (rc) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 012973d75ad0..14ac9cab2653 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -158,7 +158,8 @@ static void qed_ll2_kill_buffers(struct qed_dev *cdev)
qed_ll2_dealloc_buffer(cdev, buffer);
}
-void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
+static void qed_ll2b_complete_rx_packet(void *cxt,
+ struct qed_ll2_comp_rx_data *data)
{
struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_buffer *buffer = data->cookie;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 758a9a5127fa..2094d86a7a08 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -948,13 +948,14 @@ static void qed_update_pf_params(struct qed_dev *cdev,
params->eth_pf_params.num_arfs_filters = 0;
/* In case we might support RDMA, don't allow qede to be greedy
- * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn.
+ * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp]
+ * per hwfn.
*/
if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
u16 *num_cons;
num_cons = &params->eth_pf_params.num_cons;
- *num_cons = min_t(u16, *num_cons, 192);
+ *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS);
}
for (i = 0; i < cdev->num_hwfns; i++) {
@@ -2102,6 +2103,28 @@ out:
return status;
}
+static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf,
+ u8 dev_addr, u32 offset, u32 len)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *ptt;
+ int rc = 0;
+
+ if (IS_VF(cdev))
+ return 0;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr,
+ offset, len, buf);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
static struct qed_selftest_ops qed_selftest_ops_pass = {
.selftest_memory = &qed_selftest_memory,
.selftest_interrupt = &qed_selftest_interrupt,
@@ -2144,6 +2167,7 @@ const struct qed_common_ops qed_common_ops_pass = {
.update_mac = &qed_update_mac,
.update_mtu = &qed_update_mtu,
.update_wol = &qed_update_wol,
+ .read_module_eeprom = &qed_read_module_eeprom,
};
void qed_get_protocol_stats(struct qed_dev *cdev,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index cdd645024a32..d89a0e22f6e4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -570,12 +570,13 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
return 0;
}
-int qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 cmd,
- u32 param,
- u32 *o_mcp_resp,
- u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf)
+static int
+qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf)
{
struct qed_mcp_mb_params mb_params;
int rc;
@@ -1551,7 +1552,8 @@ qed_mcp_handle_ufp_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
if (p_hwfn->ufp_info.mode == QED_UFP_MODE_VNIC_BW) {
p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
- p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc;
+ qed_hw_info_set_offload_tc(&p_hwfn->hw_info,
+ p_hwfn->ufp_info.tc);
qed_qm_reconf(p_hwfn, p_ptt);
} else if (p_hwfn->ufp_info.mode == QED_UFP_MODE_ETS) {
@@ -2473,6 +2475,55 @@ out:
return rc;
}
+int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf)
+{
+ u32 bytes_left, bytes_to_copy, buf_size, nvm_offset = 0;
+ u32 resp, param;
+ int rc;
+
+ nvm_offset |= (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) &
+ DRV_MB_PARAM_TRANSCEIVER_PORT_MASK;
+ nvm_offset |= (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET) &
+ DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK;
+
+ addr = offset;
+ offset = 0;
+ bytes_left = len;
+ while (bytes_left > 0) {
+ bytes_to_copy = min_t(u32, bytes_left,
+ MAX_I2C_TRANSACTION_SIZE);
+ nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
+ DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
+ nvm_offset |= ((addr + offset) <<
+ DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET) &
+ DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK;
+ nvm_offset |= (bytes_to_copy <<
+ DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET) &
+ DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK;
+ rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_TRANSCEIVER_READ,
+ nvm_offset, &resp, &param, &buf_size,
+ (u32 *)(p_buf + offset));
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to send a transceiver read command to the MFW. rc = %d.\n",
+ rc);
+ return rc;
+ }
+
+ if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
+ return -ENODEV;
+ else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
+ return -EINVAL;
+
+ offset += buf_size;
+ bytes_left -= buf_size;
+ }
+
+ return 0;
+}
+
int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 drv_mb_param = 0, rsp, param;
@@ -2959,7 +3010,7 @@ static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn,
return rc;
}
-int
+static int
__qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_resc_lock_params *p_params)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 632a838f1fe3..047976d5c6e9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -840,6 +840,22 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf);
/**
+ * @brief Read from sfp
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param port - transceiver port
+ * @param addr - I2C address
+ * @param offset - offset in sfp
+ * @param len - buffer length
+ * @param p_buf - buffer to read into
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf);
+
+/**
* @brief indicates whether the MFW objects [under mcp_info] are accessible
*
* @param p_hwfn
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 101d677114f2..be941cfaa2d4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -134,7 +134,7 @@ static bool qed_bmap_is_empty(struct qed_bmap *bmap)
return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count);
}
-u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
+static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
{
/* First sb id for RoCE is after all the l2 sb */
return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
@@ -706,7 +706,7 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
return qed_rdma_start_fw(p_hwfn, params, p_ptt);
}
-int qed_rdma_stop(void *rdma_cxt)
+static int qed_rdma_stop(void *rdma_cxt)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct rdma_close_func_ramrod_data *p_ramrod;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index b5ce1581645f..7d7a64c55ff1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -44,8 +44,10 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
+#include <linux/if_vlan.h>
#include "qed.h"
#include "qed_cxt.h"
+#include "qed_dcbx.h"
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
@@ -157,7 +159,7 @@ static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
return flavor;
}
-void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
+static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
{
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
@@ -231,16 +233,33 @@ static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid)
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
+static u8 qed_roce_get_qp_tc(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
+{
+ u8 pri, tc = 0;
+
+ if (qp->vlan_id) {
+ pri = (qp->vlan_id & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ tc = qed_dcbx_get_priority_tc(p_hwfn, pri);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "qp icid %u tc: %u (vlan priority %s)\n",
+ qp->icid, tc, qp->vlan_id ? "enabled" : "disabled");
+
+ return tc;
+}
+
static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp)
{
struct roce_create_qp_resp_ramrod_data *p_ramrod;
+ u16 regular_latency_queue, low_latency_queue;
struct qed_sp_init_data init_data;
enum roce_flavor roce_flavor;
struct qed_spq_entry *p_ent;
- u16 regular_latency_queue;
enum protocol_type proto;
int rc;
+ u8 tc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
@@ -324,12 +343,17 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
qp->rq_cq_id);
- regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
-
+ tc = qed_roce_get_qp_tc(p_hwfn, qp);
+ regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc);
+ low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "qp icid %u pqs: regular_latency %u low_latency %u\n",
+ qp->icid, regular_latency_queue - CM_TX_PQ_BASE,
+ low_latency_queue - CM_TX_PQ_BASE);
p_ramrod->regular_latency_phy_queue =
cpu_to_le16(regular_latency_queue);
p_ramrod->low_latency_phy_queue =
- cpu_to_le16(regular_latency_queue);
+ cpu_to_le16(low_latency_queue);
p_ramrod->dpi = cpu_to_le16(qp->dpi);
@@ -345,11 +369,6 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
qp->stats_queue;
rc = qed_spq_post(p_hwfn, p_ent, NULL);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "rc = %d regular physical queue = 0x%x\n", rc,
- regular_latency_queue);
-
if (rc)
goto err;
@@ -375,12 +394,13 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp)
{
struct roce_create_qp_req_ramrod_data *p_ramrod;
+ u16 regular_latency_queue, low_latency_queue;
struct qed_sp_init_data init_data;
enum roce_flavor roce_flavor;
struct qed_spq_entry *p_ent;
- u16 regular_latency_queue;
enum protocol_type proto;
int rc;
+ u8 tc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
@@ -453,12 +473,17 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
p_ramrod->cq_cid =
cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
- regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
-
+ tc = qed_roce_get_qp_tc(p_hwfn, qp);
+ regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc);
+ low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "qp icid %u pqs: regular_latency %u low_latency %u\n",
+ qp->icid, regular_latency_queue - CM_TX_PQ_BASE,
+ low_latency_queue - CM_TX_PQ_BASE);
p_ramrod->regular_latency_phy_queue =
cpu_to_le16(regular_latency_queue);
p_ramrod->low_latency_phy_queue =
- cpu_to_le16(regular_latency_queue);
+ cpu_to_le16(low_latency_queue);
p_ramrod->dpi = cpu_to_le16(qp->dpi);
@@ -471,9 +496,6 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
qp->stats_queue;
rc = qed_spq_post(p_hwfn, p_ent, NULL);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
-
if (rc)
goto err;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 26e918d7f2f9..9b08a9d9e151 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -672,8 +672,8 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
return 0;
}
-bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
- int vfid, bool b_fail_malicious)
+static bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
+ int vfid, bool b_fail_malicious)
{
/* Check PF supports sriov */
if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
@@ -687,7 +687,7 @@ bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
return true;
}
-bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
+static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
{
return _qed_iov_pf_sanity_check(p_hwfn, vfid, true);
}
@@ -3979,7 +3979,7 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
}
}
-void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events)
+static void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events)
{
int i;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index be6ddde1a104..3d4269659820 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -169,7 +169,7 @@ static void qed_vf_pf_add_qid(struct qed_hwfn *p_hwfn,
p_qid_tlv->qid = p_cid->qid_usage_idx;
}
-int _qed_vf_pf_release(struct qed_hwfn *p_hwfn, bool b_final)
+static int _qed_vf_pf_release(struct qed_hwfn *p_hwfn, bool b_final)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_def_resp_tlv *resp;
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index d7ed0d3dbf71..6a4d266fb8e2 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -52,6 +52,9 @@
#include <linux/qed/qed_chain.h>
#include <linux/qed/qed_eth_if.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gact.h>
+
#define QEDE_MAJOR_VERSION 8
#define QEDE_MINOR_VERSION 33
#define QEDE_REVISION_VERSION 0
@@ -386,6 +389,15 @@ struct qede_tx_queue {
#define QEDE_TXQ_XDP_TO_IDX(edev, txq) ((txq)->index - \
QEDE_MAX_TSS_CNT(edev))
#define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev))
+#define QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx) ((edev)->fp_num_rx + \
+ ((idx) % QEDE_TSS_COUNT(edev)))
+#define QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx) ((idx) / QEDE_TSS_COUNT(edev))
+#define QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq) ((QEDE_TSS_COUNT(edev) * \
+ (txq)->cos) + (txq)->index)
+#define QEDE_NDEV_TXQ_ID_TO_TXQ(edev, idx) \
+ (&((edev)->fp_array[QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx)].txq \
+ [QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx)]))
+#define QEDE_FP_TC0_TXQ(fp) (&((fp)->txq[0]))
/* Regular Tx requires skb + metadata for release purpose,
* while XDP requires the pages and the mapped address.
@@ -399,6 +411,8 @@ struct qede_tx_queue {
/* Slowpath; Should be kept in end [unless missing padding] */
void *handle;
+ u16 cos;
+ u16 ndev_txq_id;
};
#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \
@@ -458,7 +472,7 @@ void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc);
void qede_free_arfs(struct qede_dev *edev);
int qede_alloc_arfs(struct qede_dev *edev);
int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info);
-int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info);
+int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie);
int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd);
int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info,
u32 *rule_locs);
@@ -524,6 +538,8 @@ bool qede_has_rx_work(struct qede_rx_queue *rxq);
int qede_txq_has_work(struct qede_tx_queue *txq);
void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count);
void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
+int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
+ struct tc_cls_flower_offload *f);
#define RX_RING_SIZE_POW 13
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
@@ -541,5 +557,7 @@ void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
#define QEDE_RX_HDR_SIZE 256
#define QEDE_MAX_JUMBO_PACKET_SIZE 9600
#define for_each_queue(i) for (i = 0; i < edev->num_queues; i++)
+#define for_each_cos_in_txq(edev, var) \
+ for ((var) = 0; (var) < (edev)->dev_info.num_tc; (var)++)
#endif /* _QEDE_H_ */
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index f4a0f8ff8261..19652cd27ca7 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -222,7 +222,7 @@ static void qede_get_strings_stats_txq(struct qede_dev *edev,
QEDE_TXQ_XDP_TO_IDX(edev, txq),
qede_tqstats_arr[i].string);
else
- sprintf(*buf, "%d: %s", txq->index,
+ sprintf(*buf, "%d_%d: %s", txq->index, txq->cos,
qede_tqstats_arr[i].string);
*buf += ETH_GSTRING_LEN;
}
@@ -262,8 +262,13 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
if (fp->type & QEDE_FASTPATH_XDP)
qede_get_strings_stats_txq(edev, fp->xdp_tx, &buf);
- if (fp->type & QEDE_FASTPATH_TX)
- qede_get_strings_stats_txq(edev, fp->txq, &buf);
+ if (fp->type & QEDE_FASTPATH_TX) {
+ int cos;
+
+ for_each_cos_in_txq(edev, cos)
+ qede_get_strings_stats_txq(edev,
+ &fp->txq[cos], &buf);
+ }
}
/* Account for non-queue statistics */
@@ -338,8 +343,12 @@ static void qede_get_ethtool_stats(struct net_device *dev,
if (fp->type & QEDE_FASTPATH_XDP)
qede_get_ethtool_stats_txq(fp->xdp_tx, &buf);
- if (fp->type & QEDE_FASTPATH_TX)
- qede_get_ethtool_stats_txq(fp->txq, &buf);
+ if (fp->type & QEDE_FASTPATH_TX) {
+ int cos;
+
+ for_each_cos_in_txq(edev, cos)
+ qede_get_ethtool_stats_txq(&fp->txq[cos], &buf);
+ }
}
for (i = 0; i < QEDE_NUM_STATS; i++) {
@@ -366,7 +375,8 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
num_stats--;
/* Account for the Regular Tx statistics */
- num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS;
+ num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS *
+ edev->dev_info.num_tc;
/* Account for the Regular Rx statistics */
num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS;
@@ -741,9 +751,17 @@ static int qede_get_coalesce(struct net_device *dev,
}
for_each_queue(i) {
+ struct qede_tx_queue *txq;
+
fp = &edev->fp_array[i];
+
+ /* All TX queues of given fastpath uses same
+ * coalescing value, so no need to iterate over
+ * all TCs, TC0 txq should suffice.
+ */
if (fp->type & QEDE_FASTPATH_TX) {
- tx_handle = fp->txq->handle;
+ txq = QEDE_FP_TC0_TXQ(fp);
+ tx_handle = txq->handle;
break;
}
}
@@ -801,9 +819,17 @@ static int qede_set_coalesce(struct net_device *dev,
}
if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
+ struct qede_tx_queue *txq;
+
+ /* All TX queues of given fastpath uses same
+ * coalescing value, so no need to iterate over
+ * all TCs, TC0 txq should suffice.
+ */
+ txq = QEDE_FP_TC0_TXQ(fp);
+
rc = edev->ops->common->set_coalesce(edev->cdev,
0, txc,
- fp->txq->handle);
+ txq->handle);
if (rc) {
DP_INFO(edev,
"Set TX coalesce error, rc = %d\n", rc);
@@ -1259,7 +1285,7 @@ static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
rc = qede_add_cls_rule(edev, info);
break;
case ETHTOOL_SRXCLSRLDEL:
- rc = qede_del_cls_rule(edev, info);
+ rc = qede_delete_flow_filter(edev, info->fs.location);
break;
default:
DP_INFO(edev, "Command parameters not supported\n");
@@ -1385,8 +1411,10 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
u16 val;
for_each_queue(i) {
- if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
- txq = edev->fp_array[i].txq;
+ struct qede_fastpath *fp = &edev->fp_array[i];
+
+ if (fp->type & QEDE_FASTPATH_TX) {
+ txq = QEDE_FP_TC0_TXQ(fp);
break;
}
}
@@ -1780,6 +1808,92 @@ static int qede_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return 0;
}
+static int qede_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u8 buf[4];
+ int rc;
+
+ /* Read first 4 bytes to find the sfp type */
+ rc = edev->ops->common->read_module_eeprom(edev->cdev, buf,
+ QED_I2C_DEV_ADDR_A0, 0, 4);
+ if (rc) {
+ DP_ERR(edev, "Failed reading EEPROM data %d\n", rc);
+ return rc;
+ }
+
+ switch (buf[0]) {
+ case 0x3: /* SFP, SFP+, SFP-28 */
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ break;
+ case 0xc: /* QSFP */
+ case 0xd: /* QSFP+ */
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ break;
+ case 0x11: /* QSFP-28 */
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ break;
+ default:
+ DP_ERR(edev, "Unknown transceiver type 0x%x\n", buf[0]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qede_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u32 start_addr = ee->offset, size = 0;
+ u8 *buf = data;
+ int rc = 0;
+
+ /* Read A0 section */
+ if (ee->offset < ETH_MODULE_SFF_8079_LEN) {
+ /* Limit transfer size to the A0 section boundary */
+ if (ee->offset + ee->len > ETH_MODULE_SFF_8079_LEN)
+ size = ETH_MODULE_SFF_8079_LEN - ee->offset;
+ else
+ size = ee->len;
+
+ rc = edev->ops->common->read_module_eeprom(edev->cdev, buf,
+ QED_I2C_DEV_ADDR_A0,
+ start_addr, size);
+ if (rc) {
+ DP_ERR(edev, "Failed reading A0 section %d\n", rc);
+ return rc;
+ }
+
+ buf += size;
+ start_addr += size;
+ }
+
+ /* Read A2 section */
+ if (start_addr >= ETH_MODULE_SFF_8079_LEN &&
+ start_addr < ETH_MODULE_SFF_8472_LEN) {
+ size = ee->len - size;
+ /* Limit transfer size to the A2 section boundary */
+ if (start_addr + size > ETH_MODULE_SFF_8472_LEN)
+ size = ETH_MODULE_SFF_8472_LEN - start_addr;
+ start_addr -= ETH_MODULE_SFF_8079_LEN;
+ rc = edev->ops->common->read_module_eeprom(edev->cdev, buf,
+ QED_I2C_DEV_ADDR_A2,
+ start_addr, size);
+ if (rc) {
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Failed reading A2 section %d\n", rc);
+ return 0;
+ }
+ }
+
+ return rc;
+}
+
static const struct ethtool_ops qede_ethtool_ops = {
.get_link_ksettings = qede_get_link_ksettings,
.set_link_ksettings = qede_set_link_ksettings,
@@ -1813,6 +1927,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
.get_channels = qede_get_channels,
.set_channels = qede_set_channels,
.self_test = qede_self_test,
+ .get_module_info = qede_get_module_info,
+ .get_module_eeprom = qede_get_module_eeprom,
.get_eee = qede_get_eee,
.set_eee = qede_set_eee,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index b823bfe2ea4d..9673d19308e6 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -83,7 +83,7 @@ struct qede_arfs_fltr_node {
struct qede_arfs_tuple tuple;
u32 flow_id;
- u16 sw_id;
+ u64 sw_id;
u16 rxq_id;
u16 next_rxq_id;
u8 vfid;
@@ -138,7 +138,7 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev,
n->tuple.stringify(&n->tuple, tuple_buffer);
DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
- "%s sw_id[0x%x]: %s [vf %u queue %d]\n",
+ "%s sw_id[0x%llx]: %s [vf %u queue %d]\n",
add_fltr ? "Adding" : "Deleting",
n->sw_id, tuple_buffer, n->vfid, rxq_id);
}
@@ -152,7 +152,10 @@ static void
qede_free_arfs_filter(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr)
{
kfree(fltr->data);
- clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
+
+ if (fltr->sw_id < QEDE_RFS_MAX_FLTR)
+ clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
+
kfree(fltr);
}
@@ -214,7 +217,7 @@ void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc)
if (fw_rc) {
DP_NOTICE(edev,
- "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
+ "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=0x%llx, src_port=%d, dst_port=%d, rxq=%d\n",
fw_rc, fltr->flow_id, fltr->sw_id,
ntohs(fltr->tuple.src_port),
ntohs(fltr->tuple.dst_port), fltr->rxq_id);
@@ -1116,7 +1119,6 @@ int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp)
case XDP_SETUP_PROG:
return qede_xdp_set(edev, xdp->prog);
case XDP_QUERY_PROG:
- xdp->prog_attached = !!edev->xdp_prog;
xdp->prog_id = edev->xdp_prog ? edev->xdp_prog->aux->id : 0;
return 0;
default:
@@ -1349,7 +1351,7 @@ out:
}
static struct qede_arfs_fltr_node *
-qede_get_arfs_fltr_by_loc(struct hlist_head *head, u32 location)
+qede_get_arfs_fltr_by_loc(struct hlist_head *head, u64 location)
{
struct qede_arfs_fltr_node *fltr;
@@ -1600,6 +1602,69 @@ static int qede_flow_spec_validate_unused(struct qede_dev *edev,
return 0;
}
+static int qede_set_v4_tuple_to_profile(struct qede_dev *edev,
+ struct qede_arfs_tuple *t)
+{
+ /* We must have Only 4-tuples/l4 port/src ip/dst ip
+ * as an input.
+ */
+ if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) {
+ t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
+ } else if (!t->src_port && t->dst_port &&
+ !t->src_ipv4 && !t->dst_ipv4) {
+ t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
+ } else if (!t->src_port && !t->dst_port &&
+ !t->dst_ipv4 && t->src_ipv4) {
+ t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
+ } else if (!t->src_port && !t->dst_port &&
+ t->dst_ipv4 && !t->src_ipv4) {
+ t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
+ } else {
+ DP_INFO(edev, "Invalid N-tuple\n");
+ return -EOPNOTSUPP;
+ }
+
+ t->ip_comp = qede_flow_spec_ipv4_cmp;
+ t->build_hdr = qede_flow_build_ipv4_hdr;
+ t->stringify = qede_flow_stringify_ipv4_hdr;
+
+ return 0;
+}
+
+static int qede_set_v6_tuple_to_profile(struct qede_dev *edev,
+ struct qede_arfs_tuple *t,
+ struct in6_addr *zaddr)
+{
+ /* We must have Only 4-tuples/l4 port/src ip/dst ip
+ * as an input.
+ */
+ if (t->src_port && t->dst_port &&
+ memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) &&
+ memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) {
+ t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
+ } else if (!t->src_port && t->dst_port &&
+ !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) &&
+ !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) {
+ t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
+ } else if (!t->src_port && !t->dst_port &&
+ !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) &&
+ memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
+ t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
+ } else if (!t->src_port && !t->dst_port &&
+ memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) &&
+ !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
+ t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
+ } else {
+ DP_INFO(edev, "Invalid N-tuple\n");
+ return -EOPNOTSUPP;
+ }
+
+ t->ip_comp = qede_flow_spec_ipv6_cmp;
+ t->build_hdr = qede_flow_build_ipv6_hdr;
+
+ return 0;
+}
+
static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev,
struct qede_arfs_tuple *t,
struct ethtool_rx_flow_spec *fs)
@@ -1639,27 +1704,7 @@ static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev,
t->src_port = fs->h_u.tcp_ip4_spec.psrc;
t->dst_port = fs->h_u.tcp_ip4_spec.pdst;
- /* We must either have a valid 4-tuple or only dst port
- * or only src ip as an input
- */
- if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) {
- t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
- } else if (!t->src_port && t->dst_port &&
- !t->src_ipv4 && !t->dst_ipv4) {
- t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
- } else if (!t->src_port && !t->dst_port &&
- !t->dst_ipv4 && t->src_ipv4) {
- t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
- } else {
- DP_INFO(edev, "Invalid N-tuple\n");
- return -EOPNOTSUPP;
- }
-
- t->ip_comp = qede_flow_spec_ipv4_cmp;
- t->build_hdr = qede_flow_build_ipv4_hdr;
- t->stringify = qede_flow_stringify_ipv4_hdr;
-
- return 0;
+ return qede_set_v4_tuple_to_profile(edev, t);
}
static int qede_flow_spec_to_tuple_tcpv4(struct qede_dev *edev,
@@ -1691,10 +1736,8 @@ static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev,
struct ethtool_rx_flow_spec *fs)
{
struct in6_addr zero_addr;
- void *p;
- p = &zero_addr;
- memset(p, 0, sizeof(zero_addr));
+ memset(&zero_addr, 0, sizeof(zero_addr));
if ((fs->h_u.tcp_ip6_spec.psrc &
fs->m_u.tcp_ip6_spec.psrc) != fs->h_u.tcp_ip6_spec.psrc) {
@@ -1721,30 +1764,7 @@ static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev,
t->src_port = fs->h_u.tcp_ip6_spec.psrc;
t->dst_port = fs->h_u.tcp_ip6_spec.pdst;
- /* We must make sure we have a valid 4-tuple or only dest port
- * or only src ip as an input
- */
- if (t->src_port && t->dst_port &&
- memcmp(&t->src_ipv6, p, sizeof(struct in6_addr)) &&
- memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr))) {
- t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
- } else if (!t->src_port && t->dst_port &&
- !memcmp(&t->src_ipv6, p, sizeof(struct in6_addr)) &&
- !memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr))) {
- t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
- } else if (!t->src_port && !t->dst_port &&
- !memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr)) &&
- memcmp(&t->src_ipv6, p, sizeof(struct in6_addr))) {
- t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
- } else {
- DP_INFO(edev, "Invalid N-tuple\n");
- return -EOPNOTSUPP;
- }
-
- t->ip_comp = qede_flow_spec_ipv6_cmp;
- t->build_hdr = qede_flow_build_ipv6_hdr;
-
- return 0;
+ return qede_set_v6_tuple_to_profile(edev, t, &zero_addr);
}
static int qede_flow_spec_to_tuple_tcpv6(struct qede_dev *edev,
@@ -1942,9 +1962,8 @@ unlock:
return rc;
}
-int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
+int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie)
{
- struct ethtool_rx_flow_spec *fsp = &info->fs;
struct qede_arfs_fltr_node *fltr = NULL;
int rc = -EPERM;
@@ -1953,7 +1972,7 @@ int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
goto unlock;
fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
- fsp->location);
+ cookie);
if (!fltr)
goto unlock;
@@ -1983,3 +2002,293 @@ unlock:
__qede_unlock(edev);
return count;
}
+
+static int qede_parse_actions(struct qede_dev *edev,
+ struct tcf_exts *exts)
+{
+ int rc = -EINVAL, num_act = 0;
+ const struct tc_action *a;
+ bool is_drop = false;
+ LIST_HEAD(actions);
+
+ if (!tcf_exts_has_actions(exts)) {
+ DP_NOTICE(edev, "No tc actions received\n");
+ return rc;
+ }
+
+ tcf_exts_to_list(exts, &actions);
+ list_for_each_entry(a, &actions, list) {
+ num_act++;
+
+ if (is_tcf_gact_shot(a))
+ is_drop = true;
+ }
+
+ if (num_act == 1 && is_drop)
+ return 0;
+
+ return rc;
+}
+
+static int
+qede_tc_parse_ports(struct qede_dev *edev,
+ struct tc_cls_flower_offload *f,
+ struct qede_arfs_tuple *t)
+{
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_dissector_key_ports *key, *mask;
+
+ key = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_PORTS,
+ f->key);
+ mask = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_PORTS,
+ f->mask);
+
+ if ((key->src && mask->src != U16_MAX) ||
+ (key->dst && mask->dst != U16_MAX)) {
+ DP_NOTICE(edev, "Do not support ports masks\n");
+ return -EINVAL;
+ }
+
+ t->src_port = key->src;
+ t->dst_port = key->dst;
+ }
+
+ return 0;
+}
+
+static int
+qede_tc_parse_v6_common(struct qede_dev *edev,
+ struct tc_cls_flower_offload *f,
+ struct qede_arfs_tuple *t)
+{
+ struct in6_addr zero_addr, addr;
+
+ memset(&zero_addr, 0, sizeof(addr));
+ memset(&addr, 0xff, sizeof(addr));
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ struct flow_dissector_key_ipv6_addrs *key, *mask;
+
+ key = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ f->key);
+ mask = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ f->mask);
+
+ if ((memcmp(&key->src, &zero_addr, sizeof(addr)) &&
+ memcmp(&mask->src, &addr, sizeof(addr))) ||
+ (memcmp(&key->dst, &zero_addr, sizeof(addr)) &&
+ memcmp(&mask->dst, &addr, sizeof(addr)))) {
+ DP_NOTICE(edev,
+ "Do not support IPv6 address prefix/mask\n");
+ return -EINVAL;
+ }
+
+ memcpy(&t->src_ipv6, &key->src, sizeof(addr));
+ memcpy(&t->dst_ipv6, &key->dst, sizeof(addr));
+ }
+
+ if (qede_tc_parse_ports(edev, f, t))
+ return -EINVAL;
+
+ return qede_set_v6_tuple_to_profile(edev, t, &zero_addr);
+}
+
+static int
+qede_tc_parse_v4_common(struct qede_dev *edev,
+ struct tc_cls_flower_offload *f,
+ struct qede_arfs_tuple *t)
+{
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_dissector_key_ipv4_addrs *key, *mask;
+
+ key = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ f->key);
+ mask = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ f->mask);
+
+ if ((key->src && mask->src != U32_MAX) ||
+ (key->dst && mask->dst != U32_MAX)) {
+ DP_NOTICE(edev, "Do not support ipv4 prefix/masks\n");
+ return -EINVAL;
+ }
+
+ t->src_ipv4 = key->src;
+ t->dst_ipv4 = key->dst;
+ }
+
+ if (qede_tc_parse_ports(edev, f, t))
+ return -EINVAL;
+
+ return qede_set_v4_tuple_to_profile(edev, t);
+}
+
+static int
+qede_tc_parse_tcp_v6(struct qede_dev *edev,
+ struct tc_cls_flower_offload *f,
+ struct qede_arfs_tuple *tuple)
+{
+ tuple->ip_proto = IPPROTO_TCP;
+ tuple->eth_proto = htons(ETH_P_IPV6);
+
+ return qede_tc_parse_v6_common(edev, f, tuple);
+}
+
+static int
+qede_tc_parse_tcp_v4(struct qede_dev *edev,
+ struct tc_cls_flower_offload *f,
+ struct qede_arfs_tuple *tuple)
+{
+ tuple->ip_proto = IPPROTO_TCP;
+ tuple->eth_proto = htons(ETH_P_IP);
+
+ return qede_tc_parse_v4_common(edev, f, tuple);
+}
+
+static int
+qede_tc_parse_udp_v6(struct qede_dev *edev,
+ struct tc_cls_flower_offload *f,
+ struct qede_arfs_tuple *tuple)
+{
+ tuple->ip_proto = IPPROTO_UDP;
+ tuple->eth_proto = htons(ETH_P_IPV6);
+
+ return qede_tc_parse_v6_common(edev, f, tuple);
+}
+
+static int
+qede_tc_parse_udp_v4(struct qede_dev *edev,
+ struct tc_cls_flower_offload *f,
+ struct qede_arfs_tuple *tuple)
+{
+ tuple->ip_proto = IPPROTO_UDP;
+ tuple->eth_proto = htons(ETH_P_IP);
+
+ return qede_tc_parse_v4_common(edev, f, tuple);
+}
+
+static int
+qede_parse_flower_attr(struct qede_dev *edev, __be16 proto,
+ struct tc_cls_flower_offload *f,
+ struct qede_arfs_tuple *tuple)
+{
+ int rc = -EINVAL;
+ u8 ip_proto = 0;
+
+ memset(tuple, 0, sizeof(*tuple));
+
+ if (f->dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS))) {
+ DP_NOTICE(edev, "Unsupported key set:0x%x\n",
+ f->dissector->used_keys);
+ return -EOPNOTSUPP;
+ }
+
+ if (proto != htons(ETH_P_IP) &&
+ proto != htons(ETH_P_IPV6)) {
+ DP_NOTICE(edev, "Unsupported proto=0x%x\n", proto);
+ return -EPROTONOSUPPORT;
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_dissector_key_basic *key;
+
+ key = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->key);
+ ip_proto = key->ip_proto;
+ }
+
+ if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IP))
+ rc = qede_tc_parse_tcp_v4(edev, f, tuple);
+ else if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IPV6))
+ rc = qede_tc_parse_tcp_v6(edev, f, tuple);
+ else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IP))
+ rc = qede_tc_parse_udp_v4(edev, f, tuple);
+ else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IPV6))
+ rc = qede_tc_parse_udp_v6(edev, f, tuple);
+ else
+ DP_NOTICE(edev, "Invalid tc protocol request\n");
+
+ return rc;
+}
+
+int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
+ struct tc_cls_flower_offload *f)
+{
+ struct qede_arfs_fltr_node *n;
+ int min_hlen, rc = -EINVAL;
+ struct qede_arfs_tuple t;
+
+ __qede_lock(edev);
+
+ if (!edev->arfs) {
+ rc = -EPERM;
+ goto unlock;
+ }
+
+ /* parse flower attribute and prepare filter */
+ if (qede_parse_flower_attr(edev, proto, f, &t))
+ goto unlock;
+
+ /* Validate profile mode and number of filters */
+ if ((edev->arfs->filter_count && edev->arfs->mode != t.mode) ||
+ edev->arfs->filter_count == QEDE_RFS_MAX_FLTR) {
+ DP_NOTICE(edev,
+ "Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n",
+ t.mode, edev->arfs->mode, edev->arfs->filter_count);
+ goto unlock;
+ }
+
+ /* parse tc actions and get the vf_id */
+ if (qede_parse_actions(edev, f->exts))
+ goto unlock;
+
+ if (qede_flow_find_fltr(edev, &t)) {
+ rc = -EEXIST;
+ goto unlock;
+ }
+
+ n = kzalloc(sizeof(*n), GFP_KERNEL);
+ if (!n) {
+ rc = -ENOMEM;
+ goto unlock;
+ }
+
+ min_hlen = qede_flow_get_min_header_size(&t);
+
+ n->data = kzalloc(min_hlen, GFP_KERNEL);
+ if (!n->data) {
+ kfree(n);
+ rc = -ENOMEM;
+ goto unlock;
+ }
+
+ memcpy(&n->tuple, &t, sizeof(n->tuple));
+
+ n->buf_len = min_hlen;
+ n->b_is_drop = true;
+ n->sw_id = f->cookie;
+
+ n->tuple.build_hdr(&n->tuple, n->data);
+
+ rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
+ if (rc)
+ goto unlock;
+
+ qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
+ rc = qede_poll_arfs_filter_config(edev, n);
+
+unlock:
+ __qede_unlock(edev);
+ return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 6c702399b801..1a78027de071 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -408,12 +408,12 @@ static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
{
+ unsigned int pkts_compl = 0, bytes_compl = 0;
struct netdev_queue *netdev_txq;
u16 hw_bd_cons;
- unsigned int pkts_compl = 0, bytes_compl = 0;
int rc;
- netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
+ netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
barrier();
@@ -1119,8 +1119,10 @@ static bool qede_rx_xdp(struct qede_dev *edev,
default:
bpf_warn_invalid_xdp_action(act);
+ /* Fall through */
case XDP_ABORTED:
trace_xdp_exception(edev->ndev, prog, act);
+ /* Fall through */
case XDP_DROP:
qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
}
@@ -1363,9 +1365,14 @@ static bool qede_poll_is_more_work(struct qede_fastpath *fp)
if (qede_txq_has_work(fp->xdp_tx))
return true;
- if (likely(fp->type & QEDE_FASTPATH_TX))
- if (qede_txq_has_work(fp->txq))
- return true;
+ if (likely(fp->type & QEDE_FASTPATH_TX)) {
+ int cos;
+
+ for_each_cos_in_txq(fp->edev, cos) {
+ if (qede_txq_has_work(&fp->txq[cos]))
+ return true;
+ }
+ }
return false;
}
@@ -1380,8 +1387,14 @@ int qede_poll(struct napi_struct *napi, int budget)
struct qede_dev *edev = fp->edev;
int rx_work_done = 0;
- if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq))
- qede_tx_int(edev, fp->txq);
+ if (likely(fp->type & QEDE_FASTPATH_TX)) {
+ int cos;
+
+ for_each_cos_in_txq(fp->edev, cos) {
+ if (qede_txq_has_work(&fp->txq[cos]))
+ qede_tx_int(edev, &fp->txq[cos]);
+ }
+ }
if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx))
qede_xdp_tx_int(edev, fp->xdp_tx);
@@ -1442,8 +1455,8 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* Get tx-queue context and netdev index */
txq_index = skb_get_queue_mapping(skb);
- WARN_ON(txq_index >= QEDE_TSS_COUNT(edev));
- txq = edev->fp_array[edev->fp_num_rx + txq_index].txq;
+ WARN_ON(txq_index >= QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc);
+ txq = QEDE_NDEV_TXQ_ID_TO_TXQ(edev, txq_index);
netdev_txq = netdev_get_tx_queue(ndev, txq_index);
WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 6a796040a32c..46d0f2eaa0c0 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -536,6 +536,97 @@ static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
}
+static int qede_setup_tc(struct net_device *ndev, u8 num_tc)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ int cos, count, offset;
+
+ if (num_tc > edev->dev_info.num_tc)
+ return -EINVAL;
+
+ netdev_reset_tc(ndev);
+ netdev_set_num_tc(ndev, num_tc);
+
+ for_each_cos_in_txq(edev, cos) {
+ count = QEDE_TSS_COUNT(edev);
+ offset = cos * QEDE_TSS_COUNT(edev);
+ netdev_set_tc_queue(ndev, cos, count, offset);
+ }
+
+ return 0;
+}
+
+static int
+qede_set_flower(struct qede_dev *edev, struct tc_cls_flower_offload *f,
+ __be16 proto)
+{
+ switch (f->command) {
+ case TC_CLSFLOWER_REPLACE:
+ return qede_add_tc_flower_fltr(edev, proto, f);
+ case TC_CLSFLOWER_DESTROY:
+ return qede_delete_flow_filter(edev, f->cookie);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct tc_cls_flower_offload *f;
+ struct qede_dev *edev = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(edev->ndev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ f = type_data;
+ return qede_set_flower(edev, f, f->common.protocol);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int qede_setup_tc_block(struct qede_dev *edev,
+ struct tc_block_offload *f)
+{
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block,
+ qede_setup_tc_block_cb,
+ edev, edev, f->extack);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block, qede_setup_tc_block_cb, edev);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct tc_mqprio_qopt *mqprio;
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return qede_setup_tc_block(edev, type_data);
+ case TC_SETUP_QDISC_MQPRIO:
+ mqprio = type_data;
+
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ return qede_setup_tc(dev, mqprio->num_tc);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops qede_netdev_ops = {
.ndo_open = qede_open,
.ndo_stop = qede_close,
@@ -568,6 +659,7 @@ static const struct net_device_ops qede_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = qede_rx_flow_steer,
#endif
+ .ndo_setup_tc = qede_setup_tc_offload,
};
static const struct net_device_ops qede_netdev_vf_ops = {
@@ -621,7 +713,8 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
struct qede_dev *edev;
ndev = alloc_etherdev_mqs(sizeof(*edev),
- info->num_queues, info->num_queues);
+ info->num_queues * info->num_tc,
+ info->num_queues);
if (!ndev) {
pr_err("etherdev allocation failed\n");
return NULL;
@@ -688,7 +781,7 @@ static void qede_init_ndev(struct qede_dev *edev)
/* user-changeble features */
hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC;
if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1)
hw_features |= NETIF_F_NTUPLE;
@@ -830,7 +923,8 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
}
if (fp->type & QEDE_FASTPATH_TX) {
- fp->txq = kzalloc(sizeof(*fp->txq), GFP_KERNEL);
+ fp->txq = kcalloc(edev->dev_info.num_tc,
+ sizeof(*fp->txq), GFP_KERNEL);
if (!fp->txq)
goto err;
}
@@ -879,10 +973,15 @@ static void qede_sp_task(struct work_struct *work)
static void qede_update_pf_params(struct qed_dev *cdev)
{
struct qed_pf_params pf_params;
+ u16 num_cons;
/* 64 rx + 64 tx + 64 XDP */
memset(&pf_params, 0, sizeof(struct qed_pf_params));
- pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 3;
+
+ /* 1 rx + 1 xdp + max tx cos */
+ num_cons = QED_MIN_L2_CONS;
+
+ pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * num_cons;
/* Same for VFs - make sure they'll have sufficient connections
* to support XDP Tx queues.
@@ -1363,8 +1462,12 @@ static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
if (fp->type & QEDE_FASTPATH_XDP)
qede_free_mem_txq(edev, fp->xdp_tx);
- if (fp->type & QEDE_FASTPATH_TX)
- qede_free_mem_txq(edev, fp->txq);
+ if (fp->type & QEDE_FASTPATH_TX) {
+ int cos;
+
+ for_each_cos_in_txq(edev, cos)
+ qede_free_mem_txq(edev, &fp->txq[cos]);
+ }
}
/* This function allocates all memory needed for a single fp (i.e. an entity
@@ -1391,9 +1494,13 @@ static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
}
if (fp->type & QEDE_FASTPATH_TX) {
- rc = qede_alloc_mem_txq(edev, fp->txq);
- if (rc)
- goto out;
+ int cos;
+
+ for_each_cos_in_txq(edev, cos) {
+ rc = qede_alloc_mem_txq(edev, &fp->txq[cos]);
+ if (rc)
+ goto out;
+ }
}
out:
@@ -1466,10 +1573,23 @@ static void qede_init_fp(struct qede_dev *edev)
}
if (fp->type & QEDE_FASTPATH_TX) {
- fp->txq->index = txq_index++;
- if (edev->dev_info.is_legacy)
- fp->txq->is_legacy = 1;
- fp->txq->dev = &edev->pdev->dev;
+ int cos;
+
+ for_each_cos_in_txq(edev, cos) {
+ struct qede_tx_queue *txq = &fp->txq[cos];
+ u16 ndev_tx_id;
+
+ txq->cos = cos;
+ txq->index = txq_index;
+ ndev_tx_id = QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq);
+ txq->ndev_txq_id = ndev_tx_id;
+
+ if (edev->dev_info.is_legacy)
+ txq->is_legacy = 1;
+ txq->dev = &edev->pdev->dev;
+ }
+
+ txq_index++;
}
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
@@ -1483,7 +1603,9 @@ static int qede_set_real_num_queues(struct qede_dev *edev)
{
int rc = 0;
- rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev));
+ rc = netif_set_real_num_tx_queues(edev->ndev,
+ QEDE_TSS_COUNT(edev) *
+ edev->dev_info.num_tc);
if (rc) {
DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
return rc;
@@ -1685,9 +1807,13 @@ static int qede_stop_queues(struct qede_dev *edev)
fp = &edev->fp_array[i];
if (fp->type & QEDE_FASTPATH_TX) {
- rc = qede_drain_txq(edev, fp->txq, true);
- if (rc)
- return rc;
+ int cos;
+
+ for_each_cos_in_txq(edev, cos) {
+ rc = qede_drain_txq(edev, &fp->txq[cos], true);
+ if (rc)
+ return rc;
+ }
}
if (fp->type & QEDE_FASTPATH_XDP) {
@@ -1703,9 +1829,13 @@ static int qede_stop_queues(struct qede_dev *edev)
/* Stop the Tx Queue(s) */
if (fp->type & QEDE_FASTPATH_TX) {
- rc = qede_stop_txq(edev, fp->txq, i);
- if (rc)
- return rc;
+ int cos;
+
+ for_each_cos_in_txq(edev, cos) {
+ rc = qede_stop_txq(edev, &fp->txq[cos], i);
+ if (rc)
+ return rc;
+ }
}
/* Stop the Rx Queue */
@@ -1758,6 +1888,7 @@ static int qede_start_txq(struct qede_dev *edev,
params.p_sb = fp->sb_info;
params.sb_idx = sb_idx;
+ params.tc = txq->cos;
rc = edev->ops->q_tx_start(edev->cdev, rss_id, &params, phys_table,
page_cnt, &ret_params);
@@ -1877,9 +2008,14 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
}
if (fp->type & QEDE_FASTPATH_TX) {
- rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0));
- if (rc)
- goto out;
+ int cos;
+
+ for_each_cos_in_txq(edev, cos) {
+ rc = qede_start_txq(edev, fp, &fp->txq[cos], i,
+ TX_PI(cos));
+ if (rc)
+ goto out;
+ }
}
}
@@ -1973,6 +2109,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
bool is_locked)
{
struct qed_link_params link_params;
+ u8 num_tc;
int rc;
DP_INFO(edev, "Starting qede load\n");
@@ -2019,6 +2156,10 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
goto err4;
DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
+ num_tc = netdev_get_num_tc(edev->ndev);
+ num_tc = num_tc ? num_tc : edev->dev_info.num_tc;
+ qede_setup_tc(edev->ndev, num_tc);
+
/* Program un-configured VLANs */
qede_configure_vlan_filters(edev);
@@ -2143,7 +2284,7 @@ static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
{
struct netdev_queue *netdev_txq;
- netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
+ netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
if (netif_xmit_stopped(netdev_txq))
return true;
@@ -2208,9 +2349,11 @@ static void qede_get_eth_tlv_data(void *dev, void *data)
for_each_queue(i) {
fp = &edev->fp_array[i];
if (fp->type & QEDE_FASTPATH_TX) {
- if (fp->txq->sw_tx_cons != fp->txq->sw_tx_prod)
+ struct qede_tx_queue *txq = QEDE_FP_TC0_TXQ(fp);
+
+ if (txq->sw_tx_cons != txq->sw_tx_prod)
etlv->txqs_empty = false;
- if (qede_is_txq_full(edev, fp->txq))
+ if (qede_is_txq_full(edev, txq))
etlv->num_txqs_full++;
}
if (fp->type & QEDE_FASTPATH_RX) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 7f7deeaf1cf0..3b0adda7cc9c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -351,9 +351,9 @@ skip:
case QLCNIC_BRDTYPE_P3P_REF_QG:
case QLCNIC_BRDTYPE_P3P_4_GB:
case QLCNIC_BRDTYPE_P3P_4_GB_MM:
-
supported |= SUPPORTED_Autoneg;
advertising |= ADVERTISED_Autoneg;
+ /* fall through */
case QLCNIC_BRDTYPE_P3P_10G_CX4:
case QLCNIC_BRDTYPE_P3P_10G_CX4_LP:
case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
@@ -377,6 +377,7 @@ skip:
supported |= SUPPORTED_TP;
check_sfp_module = netif_running(adapter->netdev) &&
ahw->has_link_events;
+ /* fall through */
case QLCNIC_BRDTYPE_P3P_10G_XFP:
supported |= SUPPORTED_FIBRE;
advertising |= ADVERTISED_FIBRE;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 7848cf04b29a..822aa393c370 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -276,13 +276,6 @@ static const unsigned crb_hub_agt[64] = {
0,
};
-static const u32 msi_tgt_status[8] = {
- ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
- ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
- ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
- ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
-};
-
/* PCI Windowing for DDR regions. */
#define QLCNIC_PCIE_SEM_TIMEOUT 10000
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 0c744b9c6e0a..77e386ebff09 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -212,7 +212,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
vp->max_tx_bw = MAX_BW;
vp->min_tx_bw = MIN_BW;
vp->spoofchk = false;
- random_ether_addr(vp->mac);
+ eth_random_addr(vp->mac);
dev_info(&adapter->pdev->dev,
"MAC Address %pM is configured for VF %d\n",
vp->mac, i);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
index 4be65d6761b3..957c72985a06 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
@@ -1176,6 +1176,7 @@ void ql_mpi_idc_work(struct work_struct *work)
case MB_CMD_PORT_RESET:
case MB_CMD_STOP_FW:
ql_link_off(qdev);
+ /* Fall through */
case MB_CMD_SET_PORT_CFG:
/* Signal the resulting link up AEN
* that the frame routing and mac addr
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index b9a7548ec6a0..0afc3d335d56 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -210,7 +210,7 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev)
rmnet_dev->netdev_ops = &rmnet_vnd_ops;
rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
- random_ether_addr(rmnet_dev->dev_addr);
+ eth_random_addr(rmnet_dev->dev_addr);
rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
/* Raw IP mode */
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
index 7c69f4c8134d..e1cd934c2e4f 100644
--- a/drivers/net/ethernet/realtek/Kconfig
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -99,7 +99,7 @@ config R8169
depends on PCI
select FW_LOADER
select CRC32
- select MII
+ select PHYLIB
---help---
Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter.
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index eaedc11ed686..0d9c3831838f 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -15,27 +15,22 @@
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
-#include <linux/mii.h>
+#include <linux/phy.h>
#include <linux/if_vlan.h>
#include <linux/crc32.h>
#include <linux/in.h>
+#include <linux/io.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/firmware.h>
-#include <linux/pci-aspm.h>
#include <linux/prefetch.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-
-#define RTL8169_VERSION "2.3LK-NAPI"
#define MODULENAME "r8169"
-#define PFX MODULENAME ": "
#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
@@ -57,19 +52,6 @@
#define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw"
#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw"
-#ifdef RTL8169_DEBUG
-#define assert(expr) \
- if (!(expr)) { \
- printk( "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr,__FILE__,__func__,__LINE__); \
- }
-#define dprintk(fmt, args...) \
- do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
-#else
-#define assert(expr) do {} while (0)
-#define dprintk(fmt, args...) do {} while (0)
-#endif /* RTL8169_DEBUG */
-
#define R8169_MSG_DEFAULT \
(NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
@@ -95,7 +77,6 @@ static const int multicast_filter_limit = 32;
#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
#define RTL8169_TX_TIMEOUT (6*HZ)
-#define RTL8169_PHY_TIMEOUT (10*HZ)
/* write/read MMIO register */
#define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg))
@@ -160,136 +141,70 @@ enum mac_version {
RTL_GIGA_MAC_NONE = 0xff,
};
-enum rtl_tx_desc_version {
- RTL_TD_0 = 0,
- RTL_TD_1 = 1,
-};
-
#define JUMBO_1K ETH_DATA_LEN
#define JUMBO_4K (4*1024 - ETH_HLEN - 2)
#define JUMBO_6K (6*1024 - ETH_HLEN - 2)
#define JUMBO_7K (7*1024 - ETH_HLEN - 2)
#define JUMBO_9K (9*1024 - ETH_HLEN - 2)
-#define _R(NAME,TD,FW,SZ) { \
- .name = NAME, \
- .txd_version = TD, \
- .fw_name = FW, \
- .jumbo_max = SZ, \
-}
-
static const struct {
const char *name;
- enum rtl_tx_desc_version txd_version;
const char *fw_name;
- u16 jumbo_max;
} rtl_chip_infos[] = {
/* PCI devices. */
- [RTL_GIGA_MAC_VER_01] =
- _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K),
- [RTL_GIGA_MAC_VER_02] =
- _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K),
- [RTL_GIGA_MAC_VER_03] =
- _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K),
- [RTL_GIGA_MAC_VER_04] =
- _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K),
- [RTL_GIGA_MAC_VER_05] =
- _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K),
- [RTL_GIGA_MAC_VER_06] =
- _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K),
+ [RTL_GIGA_MAC_VER_01] = {"RTL8169" },
+ [RTL_GIGA_MAC_VER_02] = {"RTL8169s" },
+ [RTL_GIGA_MAC_VER_03] = {"RTL8110s" },
+ [RTL_GIGA_MAC_VER_04] = {"RTL8169sb/8110sb" },
+ [RTL_GIGA_MAC_VER_05] = {"RTL8169sc/8110sc" },
+ [RTL_GIGA_MAC_VER_06] = {"RTL8169sc/8110sc" },
/* PCI-E devices. */
- [RTL_GIGA_MAC_VER_07] =
- _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K),
- [RTL_GIGA_MAC_VER_08] =
- _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K),
- [RTL_GIGA_MAC_VER_09] =
- _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K),
- [RTL_GIGA_MAC_VER_10] =
- _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K),
- [RTL_GIGA_MAC_VER_11] =
- _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K),
- [RTL_GIGA_MAC_VER_12] =
- _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K),
- [RTL_GIGA_MAC_VER_13] =
- _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K),
- [RTL_GIGA_MAC_VER_14] =
- _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K),
- [RTL_GIGA_MAC_VER_15] =
- _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K),
- [RTL_GIGA_MAC_VER_16] =
- _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K),
- [RTL_GIGA_MAC_VER_17] =
- _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K),
- [RTL_GIGA_MAC_VER_18] =
- _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K),
- [RTL_GIGA_MAC_VER_19] =
- _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K),
- [RTL_GIGA_MAC_VER_20] =
- _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K),
- [RTL_GIGA_MAC_VER_21] =
- _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K),
- [RTL_GIGA_MAC_VER_22] =
- _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K),
- [RTL_GIGA_MAC_VER_23] =
- _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K),
- [RTL_GIGA_MAC_VER_24] =
- _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K),
- [RTL_GIGA_MAC_VER_25] =
- _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1, JUMBO_9K),
- [RTL_GIGA_MAC_VER_26] =
- _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2, JUMBO_9K),
- [RTL_GIGA_MAC_VER_27] =
- _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K),
- [RTL_GIGA_MAC_VER_28] =
- _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K),
- [RTL_GIGA_MAC_VER_29] =
- _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, JUMBO_1K),
- [RTL_GIGA_MAC_VER_30] =
- _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, JUMBO_1K),
- [RTL_GIGA_MAC_VER_31] =
- _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K),
- [RTL_GIGA_MAC_VER_32] =
- _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1, JUMBO_9K),
- [RTL_GIGA_MAC_VER_33] =
- _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2, JUMBO_9K),
- [RTL_GIGA_MAC_VER_34] =
- _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3, JUMBO_9K),
- [RTL_GIGA_MAC_VER_35] =
- _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1, JUMBO_9K),
- [RTL_GIGA_MAC_VER_36] =
- _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2, JUMBO_9K),
- [RTL_GIGA_MAC_VER_37] =
- _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1, JUMBO_1K),
- [RTL_GIGA_MAC_VER_38] =
- _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1, JUMBO_9K),
- [RTL_GIGA_MAC_VER_39] =
- _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1, JUMBO_1K),
- [RTL_GIGA_MAC_VER_40] =
- _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_2, JUMBO_9K),
- [RTL_GIGA_MAC_VER_41] =
- _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K),
- [RTL_GIGA_MAC_VER_42] =
- _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_3, JUMBO_9K),
- [RTL_GIGA_MAC_VER_43] =
- _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_2, JUMBO_1K),
- [RTL_GIGA_MAC_VER_44] =
- _R("RTL8411", RTL_TD_1, FIRMWARE_8411_2, JUMBO_9K),
- [RTL_GIGA_MAC_VER_45] =
- _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_1, JUMBO_9K),
- [RTL_GIGA_MAC_VER_46] =
- _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_2, JUMBO_9K),
- [RTL_GIGA_MAC_VER_47] =
- _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_1, JUMBO_1K),
- [RTL_GIGA_MAC_VER_48] =
- _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_2, JUMBO_1K),
- [RTL_GIGA_MAC_VER_49] =
- _R("RTL8168ep/8111ep", RTL_TD_1, NULL, JUMBO_9K),
- [RTL_GIGA_MAC_VER_50] =
- _R("RTL8168ep/8111ep", RTL_TD_1, NULL, JUMBO_9K),
- [RTL_GIGA_MAC_VER_51] =
- _R("RTL8168ep/8111ep", RTL_TD_1, NULL, JUMBO_9K),
+ [RTL_GIGA_MAC_VER_07] = {"RTL8102e" },
+ [RTL_GIGA_MAC_VER_08] = {"RTL8102e" },
+ [RTL_GIGA_MAC_VER_09] = {"RTL8102e" },
+ [RTL_GIGA_MAC_VER_10] = {"RTL8101e" },
+ [RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" },
+ [RTL_GIGA_MAC_VER_12] = {"RTL8168b/8111b" },
+ [RTL_GIGA_MAC_VER_13] = {"RTL8101e" },
+ [RTL_GIGA_MAC_VER_14] = {"RTL8100e" },
+ [RTL_GIGA_MAC_VER_15] = {"RTL8100e" },
+ [RTL_GIGA_MAC_VER_16] = {"RTL8101e" },
+ [RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" },
+ [RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" },
+ [RTL_GIGA_MAC_VER_19] = {"RTL8168c/8111c" },
+ [RTL_GIGA_MAC_VER_20] = {"RTL8168c/8111c" },
+ [RTL_GIGA_MAC_VER_21] = {"RTL8168c/8111c" },
+ [RTL_GIGA_MAC_VER_22] = {"RTL8168c/8111c" },
+ [RTL_GIGA_MAC_VER_23] = {"RTL8168cp/8111cp" },
+ [RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp" },
+ [RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d", FIRMWARE_8168D_1},
+ [RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d", FIRMWARE_8168D_2},
+ [RTL_GIGA_MAC_VER_27] = {"RTL8168dp/8111dp" },
+ [RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp" },
+ [RTL_GIGA_MAC_VER_29] = {"RTL8105e", FIRMWARE_8105E_1},
+ [RTL_GIGA_MAC_VER_30] = {"RTL8105e", FIRMWARE_8105E_1},
+ [RTL_GIGA_MAC_VER_31] = {"RTL8168dp/8111dp" },
+ [RTL_GIGA_MAC_VER_32] = {"RTL8168e/8111e", FIRMWARE_8168E_1},
+ [RTL_GIGA_MAC_VER_33] = {"RTL8168e/8111e", FIRMWARE_8168E_2},
+ [RTL_GIGA_MAC_VER_34] = {"RTL8168evl/8111evl", FIRMWARE_8168E_3},
+ [RTL_GIGA_MAC_VER_35] = {"RTL8168f/8111f", FIRMWARE_8168F_1},
+ [RTL_GIGA_MAC_VER_36] = {"RTL8168f/8111f", FIRMWARE_8168F_2},
+ [RTL_GIGA_MAC_VER_37] = {"RTL8402", FIRMWARE_8402_1 },
+ [RTL_GIGA_MAC_VER_38] = {"RTL8411", FIRMWARE_8411_1 },
+ [RTL_GIGA_MAC_VER_39] = {"RTL8106e", FIRMWARE_8106E_1},
+ [RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g", FIRMWARE_8168G_2},
+ [RTL_GIGA_MAC_VER_41] = {"RTL8168g/8111g" },
+ [RTL_GIGA_MAC_VER_42] = {"RTL8168g/8111g", FIRMWARE_8168G_3},
+ [RTL_GIGA_MAC_VER_43] = {"RTL8106e", FIRMWARE_8106E_2},
+ [RTL_GIGA_MAC_VER_44] = {"RTL8411", FIRMWARE_8411_2 },
+ [RTL_GIGA_MAC_VER_45] = {"RTL8168h/8111h", FIRMWARE_8168H_1},
+ [RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h", FIRMWARE_8168H_2},
+ [RTL_GIGA_MAC_VER_47] = {"RTL8107e", FIRMWARE_8107E_1},
+ [RTL_GIGA_MAC_VER_48] = {"RTL8107e", FIRMWARE_8107E_2},
+ [RTL_GIGA_MAC_VER_49] = {"RTL8168ep/8111ep" },
+ [RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep" },
+ [RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" },
};
-#undef _R
enum cfg_version {
RTL_CFG_0 = 0x00,
@@ -399,12 +314,6 @@ enum rtl_registers {
FuncForceEvent = 0xfc,
};
-enum rtl8110_registers {
- TBICSR = 0x64,
- TBI_ANAR = 0x68,
- TBI_LPAR = 0x6a,
-};
-
enum rtl8168_8101_registers {
CSIDR = 0x64,
CSIAR = 0x68,
@@ -571,14 +480,6 @@ enum rtl_register_content {
PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
ASPM_en = (1 << 0), /* ASPM enable */
- /* TBICSR p.28 */
- TBIReset = 0x80000000,
- TBILoopback = 0x40000000,
- TBINwEnable = 0x20000000,
- TBINwRestart = 0x10000000,
- TBILinkOk = 0x02000000,
- TBINwComplete = 0x01000000,
-
/* CPlusCmd p.31 */
EnableBist = (1 << 15), // 8168 8101
Mac_dbgo_oe = (1 << 14), // 8168 8101
@@ -732,7 +633,6 @@ enum rtl_flag {
RTL_FLAG_TASK_ENABLED,
RTL_FLAG_TASK_SLOW_PENDING,
RTL_FLAG_TASK_RESET_PENDING,
- RTL_FLAG_TASK_PHY_PENDING,
RTL_FLAG_MAX
};
@@ -760,7 +660,6 @@ struct rtl8169_private {
dma_addr_t RxPhyAddr;
void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
- struct timer_list timer;
u16 cp_cmd;
u16 event_slow;
@@ -776,14 +675,7 @@ struct rtl8169_private {
void (*disable)(struct rtl8169_private *);
} jumbo_ops;
- int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
- int (*get_link_ksettings)(struct net_device *,
- struct ethtool_link_ksettings *);
- void (*phy_reset_enable)(struct rtl8169_private *tp);
void (*hw_start)(struct rtl8169_private *tp);
- unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
- unsigned int (*link_ok)(struct rtl8169_private *tp);
- int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
bool (*tso_csum)(struct rtl8169_private *, struct sk_buff *, u32 *);
struct {
@@ -792,7 +684,8 @@ struct rtl8169_private {
struct work_struct work;
} wk;
- struct mii_if_info mii;
+ unsigned supports_gmii:1;
+ struct mii_bus *mii_bus;
dma_addr_t counters_phys_addr;
struct rtl8169_counters *counters;
struct rtl8169_tc_offsets tc_offset;
@@ -822,7 +715,6 @@ MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
module_param_named(debug, debug.msg_enable, int, 0);
MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
MODULE_LICENSE("GPL");
-MODULE_VERSION(RTL8169_VERSION);
MODULE_FIRMWARE(FIRMWARE_8168D_1);
MODULE_FIRMWARE(FIRMWARE_8168D_2);
MODULE_FIRMWARE(FIRMWARE_8168E_1);
@@ -1143,21 +1035,6 @@ static void rtl_w0w1_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
rtl_writephy(tp, reg_addr, (val & ~m) | p);
}
-static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
- int val)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- rtl_writephy(tp, location, val);
-}
-
-static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- return rtl_readphy(tp, location);
-}
-
DECLARE_RTL_COND(rtl_ephyar_cond)
{
return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG;
@@ -1478,54 +1355,22 @@ static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
RTL_R8(tp, ChipCmd);
}
-static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
-{
- return RTL_R32(tp, TBICSR) & TBIReset;
-}
-
-static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
-{
- return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
-}
-
-static unsigned int rtl8169_tbi_link_ok(struct rtl8169_private *tp)
-{
- return RTL_R32(tp, TBICSR) & TBILinkOk;
-}
-
-static unsigned int rtl8169_xmii_link_ok(struct rtl8169_private *tp)
-{
- return RTL_R8(tp, PHYstatus) & LinkStatus;
-}
-
-static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
-{
- RTL_W32(tp, TBICSR, RTL_R32(tp, TBICSR) | TBIReset);
-}
-
-static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
-{
- unsigned int val;
-
- val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
- rtl_writephy(tp, MII_BMCR, val & 0xffff);
-}
-
static void rtl_link_chg_patch(struct rtl8169_private *tp)
{
struct net_device *dev = tp->dev;
+ struct phy_device *phydev = dev->phydev;
if (!netif_running(dev))
return;
if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
tp->mac_version == RTL_GIGA_MAC_VER_38) {
- if (RTL_R8(tp, PHYstatus) & _1000bpsF) {
+ if (phydev->speed == SPEED_1000) {
rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
ERIAR_EXGMAC);
rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
ERIAR_EXGMAC);
- } else if (RTL_R8(tp, PHYstatus) & _100bps) {
+ } else if (phydev->speed == SPEED_100) {
rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
ERIAR_EXGMAC);
rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
@@ -1543,7 +1388,7 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
ERIAR_EXGMAC);
} else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
tp->mac_version == RTL_GIGA_MAC_VER_36) {
- if (RTL_R8(tp, PHYstatus) & _1000bpsF) {
+ if (phydev->speed == SPEED_1000) {
rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
ERIAR_EXGMAC);
rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
@@ -1555,7 +1400,7 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
ERIAR_EXGMAC);
}
} else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
- if (RTL_R8(tp, PHYstatus) & _10bps) {
+ if (phydev->speed == SPEED_10) {
rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
ERIAR_EXGMAC);
rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
@@ -1567,25 +1412,6 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
}
}
-static void rtl8169_check_link_status(struct net_device *dev,
- struct rtl8169_private *tp)
-{
- struct device *d = tp_to_dev(tp);
-
- if (tp->link_ok(tp)) {
- rtl_link_chg_patch(tp);
- /* This is to cancel a scheduled suspend if there's one. */
- pm_request_resume(d);
- netif_carrier_on(dev);
- if (net_ratelimit())
- netif_info(tp, ifup, dev, "link up\n");
- } else {
- netif_carrier_off(dev);
- netif_info(tp, ifdown, dev, "link down\n");
- pm_runtime_idle(d);
- }
-}
-
#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
@@ -1626,21 +1452,11 @@ static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct rtl8169_private *tp = netdev_priv(dev);
- struct device *d = tp_to_dev(tp);
-
- pm_runtime_get_noresume(d);
rtl_lock_work(tp);
-
wol->supported = WAKE_ANY;
- if (pm_runtime_active(d))
- wol->wolopts = __rtl8169_get_wol(tp);
- else
- wol->wolopts = tp->saved_wolopts;
-
+ wol->wolopts = tp->saved_wolopts;
rtl_unlock_work(tp);
-
- pm_runtime_put_noidle(d);
}
static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
@@ -1716,18 +1532,21 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
struct rtl8169_private *tp = netdev_priv(dev);
struct device *d = tp_to_dev(tp);
+ if (wol->wolopts & ~WAKE_ANY)
+ return -EINVAL;
+
pm_runtime_get_noresume(d);
rtl_lock_work(tp);
+ tp->saved_wolopts = wol->wolopts;
+
if (pm_runtime_active(d))
- __rtl8169_set_wol(tp, wol->wolopts);
- else
- tp->saved_wolopts = wol->wolopts;
+ __rtl8169_set_wol(tp, tp->saved_wolopts);
rtl_unlock_work(tp);
- device_set_wakeup_enable(d, wol->wolopts);
+ device_set_wakeup_enable(d, tp->saved_wolopts);
pm_runtime_put_noidle(d);
@@ -1746,7 +1565,6 @@ static void rtl8169_get_drvinfo(struct net_device *dev,
struct rtl_fw *rtl_fw = tp->rtl_fw;
strlcpy(info->driver, MODULENAME, sizeof(info->driver));
- strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
if (!IS_ERR_OR_NULL(rtl_fw))
@@ -1759,124 +1577,6 @@ static int rtl8169_get_regs_len(struct net_device *dev)
return R8169_REGS_SIZE;
}
-static int rtl8169_set_speed_tbi(struct net_device *dev,
- u8 autoneg, u16 speed, u8 duplex, u32 ignored)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- int ret = 0;
- u32 reg;
-
- reg = RTL_R32(tp, TBICSR);
- if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
- (duplex == DUPLEX_FULL)) {
- RTL_W32(tp, TBICSR, reg & ~(TBINwEnable | TBINwRestart));
- } else if (autoneg == AUTONEG_ENABLE)
- RTL_W32(tp, TBICSR, reg | TBINwEnable | TBINwRestart);
- else {
- netif_warn(tp, link, dev,
- "incorrect speed setting refused in TBI mode\n");
- ret = -EOPNOTSUPP;
- }
-
- return ret;
-}
-
-static int rtl8169_set_speed_xmii(struct net_device *dev,
- u8 autoneg, u16 speed, u8 duplex, u32 adv)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- int giga_ctrl, bmcr;
- int rc = -EINVAL;
-
- rtl_writephy(tp, 0x1f, 0x0000);
-
- if (autoneg == AUTONEG_ENABLE) {
- int auto_nego;
-
- auto_nego = rtl_readphy(tp, MII_ADVERTISE);
- auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
- ADVERTISE_100HALF | ADVERTISE_100FULL);
-
- if (adv & ADVERTISED_10baseT_Half)
- auto_nego |= ADVERTISE_10HALF;
- if (adv & ADVERTISED_10baseT_Full)
- auto_nego |= ADVERTISE_10FULL;
- if (adv & ADVERTISED_100baseT_Half)
- auto_nego |= ADVERTISE_100HALF;
- if (adv & ADVERTISED_100baseT_Full)
- auto_nego |= ADVERTISE_100FULL;
-
- auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
-
- giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
- giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
-
- /* The 8100e/8101e/8102e do Fast Ethernet only. */
- if (tp->mii.supports_gmii) {
- if (adv & ADVERTISED_1000baseT_Half)
- giga_ctrl |= ADVERTISE_1000HALF;
- if (adv & ADVERTISED_1000baseT_Full)
- giga_ctrl |= ADVERTISE_1000FULL;
- } else if (adv & (ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full)) {
- netif_info(tp, link, dev,
- "PHY does not support 1000Mbps\n");
- goto out;
- }
-
- bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
-
- rtl_writephy(tp, MII_ADVERTISE, auto_nego);
- rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
- } else {
- if (speed == SPEED_10)
- bmcr = 0;
- else if (speed == SPEED_100)
- bmcr = BMCR_SPEED100;
- else
- goto out;
-
- if (duplex == DUPLEX_FULL)
- bmcr |= BMCR_FULLDPLX;
- }
-
- rtl_writephy(tp, MII_BMCR, bmcr);
-
- if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
- tp->mac_version == RTL_GIGA_MAC_VER_03) {
- if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
- rtl_writephy(tp, 0x17, 0x2138);
- rtl_writephy(tp, 0x0e, 0x0260);
- } else {
- rtl_writephy(tp, 0x17, 0x2108);
- rtl_writephy(tp, 0x0e, 0x0000);
- }
- }
-
- rc = 0;
-out:
- return rc;
-}
-
-static int rtl8169_set_speed(struct net_device *dev,
- u8 autoneg, u16 speed, u8 duplex, u32 advertising)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- int ret;
-
- ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
- if (ret < 0)
- goto out;
-
- if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
- (advertising & ADVERTISED_1000baseT_Full) &&
- !pci_is_pcie(tp->pci_dev)) {
- mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
- }
-out:
- return ret;
-}
-
static netdev_features_t rtl8169_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -1940,76 +1640,6 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
}
-static int rtl8169_get_link_ksettings_tbi(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- u32 status;
- u32 supported, advertising;
-
- supported =
- SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
- cmd->base.port = PORT_FIBRE;
-
- status = RTL_R32(tp, TBICSR);
- advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
- cmd->base.autoneg = !!(status & TBINwEnable);
-
- cmd->base.speed = SPEED_1000;
- cmd->base.duplex = DUPLEX_FULL; /* Always set */
-
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- advertising);
-
- return 0;
-}
-
-static int rtl8169_get_link_ksettings_xmii(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- mii_ethtool_get_link_ksettings(&tp->mii, cmd);
-
- return 0;
-}
-
-static int rtl8169_get_link_ksettings(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- int rc;
-
- rtl_lock_work(tp);
- rc = tp->get_link_ksettings(dev, cmd);
- rtl_unlock_work(tp);
-
- return rc;
-}
-
-static int rtl8169_set_link_ksettings(struct net_device *dev,
- const struct ethtool_link_ksettings *cmd)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- int rc;
- u32 advertising;
-
- if (!ethtool_convert_link_mode_to_legacy_u32(&advertising,
- cmd->link_modes.advertising))
- return -EINVAL;
-
- del_timer_sync(&tp->timer);
-
- rtl_lock_work(tp);
- rc = rtl8169_set_speed(dev, cmd->base.autoneg, cmd->base.speed,
- cmd->base.duplex, advertising);
- rtl_unlock_work(tp);
-
- return rc;
-}
-
static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *p)
{
@@ -2185,13 +1815,6 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
}
}
-static int rtl8169_nway_reset(struct net_device *dev)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- return mii_nway_restart(&tp->mii);
-}
-
/*
* Interrupt coalescing
*
@@ -2264,7 +1887,7 @@ static const struct rtl_coalesce_info *rtl_coalesce_info(struct net_device *dev)
const struct rtl_coalesce_info *ci;
int rc;
- rc = rtl8169_get_link_ksettings(dev, &ecmd);
+ rc = phy_ethtool_get_link_ksettings(dev, &ecmd);
if (rc < 0)
return ERR_PTR(rc);
@@ -2422,9 +2045,9 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_sset_count = rtl8169_get_sset_count,
.get_ethtool_stats = rtl8169_get_ethtool_stats,
.get_ts_info = ethtool_op_get_ts_info,
- .nway_reset = rtl8169_nway_reset,
- .get_link_ksettings = rtl8169_get_link_ksettings,
- .set_link_ksettings = rtl8169_set_link_ksettings,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static void rtl8169_get_mac_version(struct rtl8169_private *tp,
@@ -2537,15 +2160,15 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
"unknown MAC, using family default\n");
tp->mac_version = default_version;
} else if (tp->mac_version == RTL_GIGA_MAC_VER_42) {
- tp->mac_version = tp->mii.supports_gmii ?
+ tp->mac_version = tp->supports_gmii ?
RTL_GIGA_MAC_VER_42 :
RTL_GIGA_MAC_VER_43;
} else if (tp->mac_version == RTL_GIGA_MAC_VER_45) {
- tp->mac_version = tp->mii.supports_gmii ?
+ tp->mac_version = tp->supports_gmii ?
RTL_GIGA_MAC_VER_45 :
RTL_GIGA_MAC_VER_47;
} else if (tp->mac_version == RTL_GIGA_MAC_VER_46) {
- tp->mac_version = tp->mii.supports_gmii ?
+ tp->mac_version = tp->supports_gmii ?
RTL_GIGA_MAC_VER_46 :
RTL_GIGA_MAC_VER_48;
}
@@ -2553,7 +2176,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
static void rtl8169_print_mac_version(struct rtl8169_private *tp)
{
- dprintk("mac_version = 0x%02x\n", tp->mac_version);
+ netif_dbg(tp, drv, tp->dev, "mac_version = 0x%02x\n", tp->mac_version);
}
struct phy_reg {
@@ -4405,62 +4028,16 @@ static void rtl_hw_phy_config(struct net_device *dev)
}
}
-static void rtl_phy_work(struct rtl8169_private *tp)
-{
- struct timer_list *timer = &tp->timer;
- unsigned long timeout = RTL8169_PHY_TIMEOUT;
-
- assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
-
- if (tp->phy_reset_pending(tp)) {
- /*
- * A busy loop could burn quite a few cycles on nowadays CPU.
- * Let's delay the execution of the timer for a few ticks.
- */
- timeout = HZ/10;
- goto out_mod_timer;
- }
-
- if (tp->link_ok(tp))
- return;
-
- netif_dbg(tp, link, tp->dev, "PHY reset until link up\n");
-
- tp->phy_reset_enable(tp);
-
-out_mod_timer:
- mod_timer(timer, jiffies + timeout);
-}
-
static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
{
if (!test_and_set_bit(flag, tp->wk.flags))
schedule_work(&tp->wk.work);
}
-static void rtl8169_phy_timer(struct timer_list *t)
-{
- struct rtl8169_private *tp = from_timer(tp, t, timer);
-
- rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
-}
-
-DECLARE_RTL_COND(rtl_phy_reset_cond)
-{
- return tp->phy_reset_pending(tp);
-}
-
-static void rtl8169_phy_reset(struct net_device *dev,
- struct rtl8169_private *tp)
-{
- tp->phy_reset_enable(tp);
- rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100);
-}
-
static bool rtl_tbi_enabled(struct rtl8169_private *tp)
{
return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
- (RTL_R8(tp, PHYstatus) & TBI_Enable);
+ (RTL_R8(tp, PHYstatus) & TBI_Enable);
}
static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
@@ -4468,7 +4045,8 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
rtl_hw_phy_config(dev);
if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
- dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+ netif_dbg(tp, drv, dev,
+ "Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
RTL_W8(tp, 0x82, 0x01);
}
@@ -4478,23 +4056,18 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
- dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+ netif_dbg(tp, drv, dev,
+ "Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
RTL_W8(tp, 0x82, 0x01);
- dprintk("Set PHY Reg 0x0bh = 0x00h\n");
+ netif_dbg(tp, drv, dev,
+ "Set PHY Reg 0x0bh = 0x00h\n");
rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
}
- rtl8169_phy_reset(dev, tp);
+ /* We may have called phy_speed_down before */
+ phy_speed_up(dev->phydev);
- rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
- ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
- (tp->mii.supports_gmii ?
- ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full : 0));
-
- if (rtl_tbi_enabled(tp))
- netif_info(tp, link, dev, "TBI auto-negotiating\n");
+ genphy_soft_reset(dev->phydev);
}
static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
@@ -4539,34 +4112,10 @@ static int rtl_set_mac_address(struct net_device *dev, void *p)
static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct rtl8169_private *tp = netdev_priv(dev);
- struct mii_ioctl_data *data = if_mii(ifr);
-
- return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
-}
-
-static int rtl_xmii_ioctl(struct rtl8169_private *tp,
- struct mii_ioctl_data *data, int cmd)
-{
- switch (cmd) {
- case SIOCGMIIPHY:
- data->phy_id = 32; /* Internal PHY */
- return 0;
-
- case SIOCGMIIREG:
- data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
- return 0;
-
- case SIOCSMIIREG:
- rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
- return 0;
- }
- return -EOPNOTSUPP;
-}
+ if (!netif_running(dev))
+ return -ENODEV;
-static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
-{
- return -EOPNOTSUPP;
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
}
static void rtl_init_mdio_ops(struct rtl8169_private *tp)
@@ -4594,30 +4143,6 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp)
}
}
-static void rtl_speed_down(struct rtl8169_private *tp)
-{
- u32 adv;
- int lpa;
-
- rtl_writephy(tp, 0x1f, 0x0000);
- lpa = rtl_readphy(tp, MII_LPA);
-
- if (lpa & (LPA_10HALF | LPA_10FULL))
- adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full;
- else if (lpa & (LPA_100HALF | LPA_100FULL))
- adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
- else
- adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
- (tp->mii.supports_gmii ?
- ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full : 0);
-
- rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
- adv);
-}
-
static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
@@ -4639,56 +4164,15 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
{
- if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
+ if (!netif_running(tp->dev) || !__rtl8169_get_wol(tp))
return false;
- rtl_speed_down(tp);
+ phy_speed_down(tp->dev->phydev, false);
rtl_wol_suspend_quirk(tp);
return true;
}
-static void r8168_phy_power_up(struct rtl8169_private *tp)
-{
- rtl_writephy(tp, 0x1f, 0x0000);
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_17 ... RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- rtl_writephy(tp, 0x0e, 0x0000);
- break;
- default:
- break;
- }
- rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
-
- /* give MAC/PHY some time to resume */
- msleep(20);
-}
-
-static void r8168_phy_power_down(struct rtl8169_private *tp)
-{
- rtl_writephy(tp, 0x1f, 0x0000);
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_32:
- case RTL_GIGA_MAC_VER_33:
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
- break;
-
- case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_17 ... RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- rtl_writephy(tp, 0x0e, 0x0200);
- default:
- rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
- break;
- }
-}
-
static void r8168_pll_power_down(struct rtl8169_private *tp)
{
if (r8168_check_dash(tp))
@@ -4701,8 +4185,6 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
if (rtl_wol_pll_power_down(tp))
return;
- r8168_phy_power_down(tp);
-
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
case RTL_GIGA_MAC_VER_37:
@@ -4754,7 +4236,9 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
break;
}
- r8168_phy_power_up(tp);
+ phy_resume(tp->dev->phydev);
+ /* give MAC/PHY some time to resume */
+ msleep(20);
}
static void rtl_pll_power_down(struct rtl8169_private *tp)
@@ -5172,8 +4656,8 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp)
if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
tp->mac_version == RTL_GIGA_MAC_VER_03) {
- dprintk("Set MAC Reg C+CR Offset 0xe0. "
- "Bit-3 and bit-14 MUST be 1\n");
+ netif_dbg(tp, drv, tp->dev,
+ "Set MAC Reg C+CR Offset 0xe0. Bit 3 and Bit 14 MUST be 1\n");
tp->cp_cmd |= (1 << 14);
}
@@ -5236,12 +4720,7 @@ static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val)
rtl_csi_write(tp, 0x070c, csi | val << 24);
}
-static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
-{
- rtl_csi_access_enable(tp, 0x17);
-}
-
-static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
+static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp)
{
rtl_csi_access_enable(tp, 0x27);
}
@@ -5290,6 +4769,17 @@ static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
RTL_W8(tp, Config3, data);
}
+static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
+{
+ if (enable) {
+ RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
+ RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en);
+ } else {
+ RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
+ RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ }
+}
+
static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
{
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
@@ -5337,7 +4827,7 @@ static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
{ 0x07, 0, 0x2000 }
};
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
@@ -5346,7 +4836,7 @@ static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
{
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
@@ -5359,7 +4849,7 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
{
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
@@ -5383,7 +4873,7 @@ static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
{ 0x06, 0x0080, 0x0000 }
};
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
@@ -5399,7 +4889,7 @@ static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
{ 0x03, 0x0400, 0x0220 }
};
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
@@ -5413,14 +4903,14 @@ static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
{
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
__rtl_hw_start_8168cp(tp);
}
static void rtl_hw_start_8168d(struct rtl8169_private *tp)
{
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_disable_clock_request(tp);
@@ -5435,7 +4925,7 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp)
static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
{
- rtl_csi_access_enable_1(tp);
+ rtl_set_def_aspm_entry_latency(tp);
if (tp->dev->mtu <= ETH_DATA_LEN)
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -5453,7 +4943,7 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
{ 0x0c, 0x0100, 0x0020 }
};
- rtl_csi_access_enable_1(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -5482,7 +4972,7 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
{ 0x0a, 0x0000, 0x0040 }
};
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
@@ -5507,7 +4997,7 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
{ 0x19, 0x0000, 0x0224 }
};
- rtl_csi_access_enable_1(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
@@ -5536,11 +5026,13 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
+
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168f(struct rtl8169_private *tp)
{
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -5611,7 +5103,7 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
- rtl_csi_access_enable_1(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -5646,9 +5138,9 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
rtl_hw_start_8168g(tp);
/* disable aspm and clock request before access ephy */
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168g_1, ARRAY_SIZE(e_info_8168g_1));
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
@@ -5681,9 +5173,9 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
rtl_hw_start_8168g(tp);
/* disable aspm and clock request before access ephy */
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2));
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
@@ -5700,8 +5192,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
};
/* disable aspm and clock request before access ephy */
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1));
RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
@@ -5711,7 +5202,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
- rtl_csi_access_enable_1(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -5780,6 +5271,8 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
r8168_mac_ocp_write(tp, 0xe63e, 0x0000);
r8168_mac_ocp_write(tp, 0xc094, 0x0000);
r8168_mac_ocp_write(tp, 0xc09e, 0x0000);
+
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
@@ -5793,7 +5286,7 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
- rtl_csi_access_enable_1(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -5831,11 +5324,12 @@ static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp)
};
/* disable aspm and clock request before access ephy */
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168ep_1, ARRAY_SIZE(e_info_8168ep_1));
rtl_hw_start_8168ep(tp);
+
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp)
@@ -5847,14 +5341,15 @@ static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp)
};
/* disable aspm and clock request before access ephy */
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168ep_2, ARRAY_SIZE(e_info_8168ep_2));
rtl_hw_start_8168ep(tp);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
+
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
@@ -5868,8 +5363,7 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
};
/* disable aspm and clock request before access ephy */
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168ep_3, ARRAY_SIZE(e_info_8168ep_3));
rtl_hw_start_8168ep(tp);
@@ -5889,6 +5383,8 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
data = r8168_mac_ocp_read(tp, 0xe860);
data |= 0x0080;
r8168_mac_ocp_write(tp, 0xe860, data);
+
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168(struct rtl8169_private *tp)
@@ -6006,8 +5502,9 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
break;
default:
- printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
- tp->dev->name, tp->mac_version);
+ netif_err(tp, drv, tp->dev,
+ "unknown chipset (mac_version = %d)\n",
+ tp->mac_version);
break;
}
}
@@ -6026,7 +5523,7 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
};
u8 cfg1;
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
RTL_W8(tp, DBG_REG, FIX_NAK_1);
@@ -6045,7 +5542,7 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
{
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -6100,7 +5597,7 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
{ 0x1e, 0, 0x4000 }
};
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
/* Force LAN exit from ASPM if Rx/Tx are not idle */
RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
@@ -6384,7 +5881,6 @@ static void rtl_reset_work(struct rtl8169_private *tp)
napi_enable(&tp->napi);
rtl_hw_start(tp);
netif_wake_queue(dev);
- rtl8169_check_link_status(dev, tp);
}
static void rtl8169_tx_timeout(struct net_device *dev)
@@ -6958,20 +6454,15 @@ release_descriptor:
static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
{
struct rtl8169_private *tp = dev_instance;
- int handled = 0;
- u16 status;
+ u16 status = rtl_get_events(tp);
- status = rtl_get_events(tp);
- if (status && status != 0xffff) {
- status &= RTL_EVENT_NAPI | tp->event_slow;
- if (status) {
- handled = 1;
+ if (status == 0xffff || !(status & (RTL_EVENT_NAPI | tp->event_slow)))
+ return IRQ_NONE;
- rtl_irq_disable(tp);
- napi_schedule_irqoff(&tp->napi);
- }
- }
- return IRQ_RETVAL(handled);
+ rtl_irq_disable(tp);
+ napi_schedule_irqoff(&tp->napi);
+
+ return IRQ_HANDLED;
}
/*
@@ -7001,7 +6492,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp)
rtl8169_pcierr_interrupt(dev);
if (status & LinkChg)
- rtl8169_check_link_status(dev, tp);
+ phy_mac_interrupt(dev->phydev);
rtl_irq_enable_all(tp);
}
@@ -7015,7 +6506,6 @@ static void rtl_task(struct work_struct *work)
/* XXX - keep rtl_slow_event_work() as first element. */
{ RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
{ RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
- { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work }
};
struct rtl8169_private *tp =
container_of(work, struct rtl8169_private, wk.work);
@@ -7084,11 +6574,51 @@ static void rtl8169_rx_missed(struct net_device *dev)
RTL_W32(tp, RxMissed, 0);
}
+static void r8169_phylink_handler(struct net_device *ndev)
+{
+ struct rtl8169_private *tp = netdev_priv(ndev);
+
+ if (netif_carrier_ok(ndev)) {
+ rtl_link_chg_patch(tp);
+ pm_request_resume(&tp->pci_dev->dev);
+ } else {
+ pm_runtime_idle(&tp->pci_dev->dev);
+ }
+
+ if (net_ratelimit())
+ phy_print_status(ndev->phydev);
+}
+
+static int r8169_phy_connect(struct rtl8169_private *tp)
+{
+ struct phy_device *phydev = mdiobus_get_phy(tp->mii_bus, 0);
+ phy_interface_t phy_mode;
+ int ret;
+
+ phy_mode = tp->supports_gmii ? PHY_INTERFACE_MODE_GMII :
+ PHY_INTERFACE_MODE_MII;
+
+ ret = phy_connect_direct(tp->dev, phydev, r8169_phylink_handler,
+ phy_mode);
+ if (ret)
+ return ret;
+
+ if (!tp->supports_gmii)
+ phy_set_max_speed(phydev, SPEED_100);
+
+ /* Ensure to advertise everything, incl. pause */
+ phydev->advertising = phydev->supported;
+
+ phy_attached_info(phydev);
+
+ return 0;
+}
+
static void rtl8169_down(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
- del_timer_sync(&tp->timer);
+ phy_stop(dev->phydev);
napi_disable(&tp->napi);
netif_stop_queue(dev);
@@ -7129,6 +6659,8 @@ static int rtl8169_close(struct net_device *dev)
cancel_work_sync(&tp->wk.work);
+ phy_disconnect(dev->phydev);
+
pci_free_irq(pdev, 0, tp);
dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
@@ -7189,6 +6721,10 @@ static int rtl_open(struct net_device *dev)
if (retval < 0)
goto err_release_fw_2;
+ retval = r8169_phy_connect(tp);
+ if (retval)
+ goto err_free_irq;
+
rtl_lock_work(tp);
set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
@@ -7204,17 +6740,17 @@ static int rtl_open(struct net_device *dev)
if (!rtl8169_init_counter_offsets(tp))
netif_warn(tp, hw, dev, "counter reset/update failed\n");
+ phy_start(dev->phydev);
netif_start_queue(dev);
rtl_unlock_work(tp);
- tp->saved_wolopts = 0;
pm_runtime_put_sync(&pdev->dev);
-
- rtl8169_check_link_status(dev, tp);
out:
return retval;
+err_free_irq:
+ pci_free_irq(pdev, 0, tp);
err_release_fw_2:
rtl_release_firmware(tp);
rtl8169_rx_clear(tp);
@@ -7293,6 +6829,7 @@ static void rtl8169_net_suspend(struct net_device *dev)
if (!netif_running(dev))
return;
+ phy_stop(dev->phydev);
netif_device_detach(dev);
netif_stop_queue(dev);
@@ -7323,6 +6860,9 @@ static void __rtl8169_resume(struct net_device *dev)
netif_device_attach(dev);
rtl_pll_power_up(tp);
+ rtl8169_init_phy(dev, tp);
+
+ phy_start(tp->dev->phydev);
rtl_lock_work(tp);
napi_enable(&tp->napi);
@@ -7336,9 +6876,6 @@ static int rtl8169_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
- struct rtl8169_private *tp = netdev_priv(dev);
-
- rtl8169_init_phy(dev, tp);
if (netif_running(dev))
__rtl8169_resume(dev);
@@ -7352,13 +6889,10 @@ static int rtl8169_runtime_suspend(struct device *device)
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
- if (!tp->TxDescArray) {
- rtl_pll_power_down(tp);
+ if (!tp->TxDescArray)
return 0;
- }
rtl_lock_work(tp);
- tp->saved_wolopts = __rtl8169_get_wol(tp);
__rtl8169_set_wol(tp, WAKE_ANY);
rtl_unlock_work(tp);
@@ -7383,11 +6917,8 @@ static int rtl8169_runtime_resume(struct device *device)
rtl_lock_work(tp);
__rtl8169_set_wol(tp, tp->saved_wolopts);
- tp->saved_wolopts = 0;
rtl_unlock_work(tp);
- rtl8169_init_phy(dev, tp);
-
__rtl8169_resume(dev);
return 0;
@@ -7455,7 +6986,7 @@ static void rtl_shutdown(struct pci_dev *pdev)
rtl8169_hw_reset(tp);
if (system_state == SYSTEM_POWER_OFF) {
- if (__rtl8169_get_wol(tp) & WAKE_ANY) {
+ if (tp->saved_wolopts) {
rtl_wol_suspend_quirk(tp);
rtl_wol_shutdown_quirk(tp);
}
@@ -7476,6 +7007,7 @@ static void rtl_remove_one(struct pci_dev *pdev)
netif_napi_del(&tp->napi);
unregister_netdev(dev);
+ mdiobus_unregister(tp->mii_bus);
rtl_release_firmware(tp);
@@ -7544,6 +7076,11 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
RTL_W8(tp, Cfg9346, Cfg9346_Lock);
flags = PCI_IRQ_LEGACY;
+ } else if (tp->mac_version == RTL_GIGA_MAC_VER_40) {
+ /* This version was reported to have issues with resume
+ * from suspend when using MSI-X
+ */
+ flags = PCI_IRQ_LEGACY | PCI_IRQ_MSI;
} else {
flags = PCI_IRQ_ALL_TYPES;
}
@@ -7561,6 +7098,68 @@ DECLARE_RTL_COND(rtl_rxtx_empty_cond)
return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY;
}
+static int r8169_mdio_read_reg(struct mii_bus *mii_bus, int phyaddr, int phyreg)
+{
+ struct rtl8169_private *tp = mii_bus->priv;
+
+ if (phyaddr > 0)
+ return -ENODEV;
+
+ return rtl_readphy(tp, phyreg);
+}
+
+static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr,
+ int phyreg, u16 val)
+{
+ struct rtl8169_private *tp = mii_bus->priv;
+
+ if (phyaddr > 0)
+ return -ENODEV;
+
+ rtl_writephy(tp, phyreg, val);
+
+ return 0;
+}
+
+static int r8169_mdio_register(struct rtl8169_private *tp)
+{
+ struct pci_dev *pdev = tp->pci_dev;
+ struct phy_device *phydev;
+ struct mii_bus *new_bus;
+ int ret;
+
+ new_bus = devm_mdiobus_alloc(&pdev->dev);
+ if (!new_bus)
+ return -ENOMEM;
+
+ new_bus->name = "r8169";
+ new_bus->priv = tp;
+ new_bus->parent = &pdev->dev;
+ new_bus->irq[0] = PHY_IGNORE_INTERRUPT;
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x",
+ PCI_DEVID(pdev->bus->number, pdev->devfn));
+
+ new_bus->read = r8169_mdio_read_reg;
+ new_bus->write = r8169_mdio_write_reg;
+
+ ret = mdiobus_register(new_bus);
+ if (ret)
+ return ret;
+
+ phydev = mdiobus_get_phy(new_bus, 0);
+ if (!phydev) {
+ mdiobus_unregister(new_bus);
+ return -ENODEV;
+ }
+
+ /* PHY will be woken up in rtl_open() */
+ phy_suspend(phydev);
+
+ tp->mii_bus = new_bus;
+
+ return 0;
+}
+
static void rtl_hw_init_8168g(struct rtl8169_private *tp)
{
u32 data;
@@ -7614,19 +7213,48 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
}
}
+/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */
+static bool rtl_chip_supports_csum_v2(struct rtl8169_private *tp)
+{
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
+ case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static int rtl_jumbo_max(struct rtl8169_private *tp)
+{
+ /* Non-GBit versions don't support jumbo frames */
+ if (!tp->supports_gmii)
+ return JUMBO_1K;
+
+ switch (tp->mac_version) {
+ /* RTL8169 */
+ case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
+ return JUMBO_7K;
+ /* RTL8168b */
+ case RTL_GIGA_MAC_VER_11:
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_17:
+ return JUMBO_4K;
+ /* RTL8168c */
+ case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
+ return JUMBO_6K;
+ default:
+ return JUMBO_9K;
+ }
+}
+
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
struct rtl8169_private *tp;
- struct mii_if_info *mii;
struct net_device *dev;
int chipset, region, i;
- int rc;
-
- if (netif_msg_drv(&debug)) {
- printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
- MODULENAME, RTL8169_VERSION);
- }
+ int jumbo_max, rc;
dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp));
if (!dev)
@@ -7638,19 +7266,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->dev = dev;
tp->pci_dev = pdev;
tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
-
- mii = &tp->mii;
- mii->dev = dev;
- mii->mdio_read = rtl_mdio_read;
- mii->mdio_write = rtl_mdio_write;
- mii->phy_id_mask = 0x1f;
- mii->reg_num_mask = 0x1f;
- mii->supports_gmii = cfg->has_gmii;
-
- /* disable ASPM completely as that cause random device stop working
- * problems as well as full system hangs for some PCIe devices users */
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
- PCIE_LINK_STATE_CLKPM);
+ tp->supports_gmii = cfg->has_gmii;
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pcim_enable_device(pdev);
@@ -7689,6 +7305,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Identify chip attached to board */
rtl8169_get_mac_version(tp, cfg->default_ver);
+ if (rtl_tbi_enabled(tp)) {
+ dev_err(&pdev->dev, "TBI fiber mode not supported\n");
+ return -ENODEV;
+ }
+
tp->cp_cmd = RTL_R16(tp, CPlusCmd);
if ((sizeof(dma_addr_t) > 4) &&
@@ -7736,22 +7357,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->saved_wolopts = __rtl8169_get_wol(tp);
- if (rtl_tbi_enabled(tp)) {
- tp->set_speed = rtl8169_set_speed_tbi;
- tp->get_link_ksettings = rtl8169_get_link_ksettings_tbi;
- tp->phy_reset_enable = rtl8169_tbi_reset_enable;
- tp->phy_reset_pending = rtl8169_tbi_reset_pending;
- tp->link_ok = rtl8169_tbi_link_ok;
- tp->do_ioctl = rtl_tbi_ioctl;
- } else {
- tp->set_speed = rtl8169_set_speed_xmii;
- tp->get_link_ksettings = rtl8169_get_link_ksettings_xmii;
- tp->phy_reset_enable = rtl8169_xmii_reset_enable;
- tp->phy_reset_pending = rtl8169_xmii_reset_pending;
- tp->link_ok = rtl8169_xmii_link_ok;
- tp->do_ioctl = rtl_xmii_ioctl;
- }
-
mutex_init(&tp->wk.mutex);
u64_stats_init(&tp->rx_stats.syncp);
u64_stats_init(&tp->tx_stats.syncp);
@@ -7800,16 +7405,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Disallow toggling */
dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
- switch (rtl_chip_infos[chipset].txd_version) {
- case RTL_TD_0:
- tp->tso_csum = rtl8169_tso_csum_v1;
- break;
- case RTL_TD_1:
+ if (rtl_chip_supports_csum_v2(tp)) {
tp->tso_csum = rtl8169_tso_csum_v2;
dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
- break;
- default:
- WARN_ON_ONCE(1);
+ } else {
+ tp->tso_csum = rtl8169_tso_csum_v1;
}
dev->hw_features |= NETIF_F_RXALL;
@@ -7817,14 +7417,13 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* MTU range: 60 - hw-specific max */
dev->min_mtu = ETH_ZLEN;
- dev->max_mtu = rtl_chip_infos[chipset].jumbo_max;
+ jumbo_max = rtl_jumbo_max(tp);
+ dev->max_mtu = jumbo_max;
tp->hw_start = cfg->hw_start;
tp->event_slow = cfg->event_slow;
tp->coalesce_info = cfg->coalesce_info;
- timer_setup(&tp->timer, rtl8169_phy_timer, 0);
-
tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
@@ -7835,30 +7434,39 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
- rc = register_netdev(dev);
- if (rc < 0)
+ rc = r8169_mdio_register(tp);
+ if (rc)
return rc;
+ /* chip gets powered up in rtl_open() */
+ rtl_pll_power_down(tp);
+
+ rc = register_netdev(dev);
+ if (rc)
+ goto err_mdio_unregister;
+
netif_info(tp, probe, dev, "%s, %pM, XID %08x, IRQ %d\n",
rtl_chip_infos[chipset].name, dev->dev_addr,
(u32)(RTL_R32(tp, TxConfig) & 0xfcf0f8ff),
pci_irq_vector(pdev, 0));
- if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
- netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
- "tx checksumming: %s]\n",
- rtl_chip_infos[chipset].jumbo_max,
- tp->mac_version <= RTL_GIGA_MAC_VER_06 ? "ok" : "ko");
- }
+
+ if (jumbo_max > JUMBO_1K)
+ netif_info(tp, probe, dev,
+ "jumbo features [frames: %d bytes, tx checksumming: %s]\n",
+ jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ?
+ "ok" : "ko");
if (r8168_check_dash(tp))
rtl8168_driver_start(tp);
- netif_carrier_off(dev);
-
if (pci_dev_run_wake(pdev))
pm_runtime_put_sync(&pdev->dev);
return 0;
+
+err_mdio_unregister:
+ mdiobus_unregister(tp->mii_bus);
+ return rc;
}
static struct pci_driver rtl8169_pci_driver = {
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 0d811c02ff34..c06f2df895c2 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1167,7 +1167,7 @@ static int ravb_get_sset_count(struct net_device *netdev, int sset)
}
static void ravb_get_ethtool_stats(struct net_device *ndev,
- struct ethtool_stats *stats, u64 *data)
+ struct ethtool_stats *estats, u64 *data)
{
struct ravb_private *priv = netdev_priv(ndev);
int i = 0;
@@ -1199,7 +1199,7 @@ static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
{
switch (stringset) {
case ETH_SS_STATS:
- memcpy(data, *ravb_gstrings_stats, sizeof(ravb_gstrings_stats));
+ memcpy(data, ravb_gstrings_stats, sizeof(ravb_gstrings_stats));
break;
}
}
@@ -1564,7 +1564,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* TAG and timestamp required flag */
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
- desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12);
+ desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12);
}
skb_tx_timestamp(skb);
@@ -1597,7 +1597,8 @@ drop:
}
static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
/* If skb needs TX timestamp, it is handled in network control queue */
return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 5614fd231bbe..5573199c4536 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -439,10 +439,15 @@ static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear,
enum_index);
}
+static u16 sh_eth_tsu_get_offset(struct sh_eth_private *mdp, int enum_index)
+{
+ return mdp->reg_offset[enum_index];
+}
+
static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
int enum_index)
{
- u16 offset = mdp->reg_offset[enum_index];
+ u16 offset = sh_eth_tsu_get_offset(mdp, enum_index);
if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
return;
@@ -452,7 +457,7 @@ static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
{
- u16 offset = mdp->reg_offset[enum_index];
+ u16 offset = sh_eth_tsu_get_offset(mdp, enum_index);
if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
return ~0U;
@@ -622,7 +627,6 @@ static struct sh_eth_cpu_data r7s72100_data = {
.tpauser = 1,
.hw_swap = 1,
.rpadir = 1,
- .rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
.xdfar_rw = 1,
@@ -672,7 +676,6 @@ static struct sh_eth_cpu_data r8a7740_data = {
.bculr = 1,
.hw_swap = 1,
.rpadir = 1,
- .rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
.xdfar_rw = 1,
@@ -798,7 +801,6 @@ static struct sh_eth_cpu_data r8a77980_data = {
.hw_swap = 1,
.nbst = 1,
.rpadir = 1,
- .rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
.xdfar_rw = 1,
@@ -851,7 +853,6 @@ static struct sh_eth_cpu_data sh7724_data = {
.tpauser = 1,
.hw_swap = 1,
.rpadir = 1,
- .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
};
static void sh_eth_set_rate_sh7757(struct net_device *ndev)
@@ -898,7 +899,6 @@ static struct sh_eth_cpu_data sh7757_data = {
.hw_swap = 1,
.no_ade = 1,
.rpadir = 1,
- .rpadir_value = 2 << 16,
.rtrate = 1,
.dual_port = 1,
};
@@ -978,7 +978,6 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
.bculr = 1,
.hw_swap = 1,
.rpadir = 1,
- .rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
.xdfar_rw = 1,
@@ -1467,7 +1466,7 @@ static int sh_eth_dev_init(struct net_device *ndev)
/* Descriptor format */
sh_eth_ring_format(ndev);
if (mdp->cd->rpadir)
- sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
+ sh_eth_write(ndev, NET_IP_ALIGN << 16, RPADIR);
/* all sh_eth int mask */
sh_eth_write(ndev, 0, EESIPR);
@@ -1527,9 +1526,9 @@ static int sh_eth_dev_init(struct net_device *ndev)
/* mask reset */
if (mdp->cd->apr)
- sh_eth_write(ndev, APR_AP, APR);
+ sh_eth_write(ndev, 1, APR);
if (mdp->cd->mpr)
- sh_eth_write(ndev, MPR_MP, MPR);
+ sh_eth_write(ndev, 1, MPR);
if (mdp->cd->tpauser)
sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
@@ -2677,34 +2676,36 @@ static int sh_eth_tsu_busy(struct net_device *ndev)
return 0;
}
-static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
+static int sh_eth_tsu_write_entry(struct net_device *ndev, u16 offset,
const u8 *addr)
{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
u32 val;
val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
- iowrite32(val, reg);
+ iowrite32(val, mdp->tsu_addr + offset);
if (sh_eth_tsu_busy(ndev) < 0)
return -EBUSY;
val = addr[4] << 8 | addr[5];
- iowrite32(val, reg + 4);
+ iowrite32(val, mdp->tsu_addr + offset + 4);
if (sh_eth_tsu_busy(ndev) < 0)
return -EBUSY;
return 0;
}
-static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
+static void sh_eth_tsu_read_entry(struct net_device *ndev, u16 offset, u8 *addr)
{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
u32 val;
- val = ioread32(reg);
+ val = ioread32(mdp->tsu_addr + offset);
addr[0] = (val >> 24) & 0xff;
addr[1] = (val >> 16) & 0xff;
addr[2] = (val >> 8) & 0xff;
addr[3] = val & 0xff;
- val = ioread32(reg + 4);
+ val = ioread32(mdp->tsu_addr + offset + 4);
addr[4] = (val >> 8) & 0xff;
addr[5] = val & 0xff;
}
@@ -2713,12 +2714,12 @@ static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
+ u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
int i;
u8 c_addr[ETH_ALEN];
for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
- sh_eth_tsu_read_entry(reg_offset, c_addr);
+ sh_eth_tsu_read_entry(ndev, reg_offset, c_addr);
if (ether_addr_equal(addr, c_addr))
return i;
}
@@ -2740,7 +2741,7 @@ static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
int entry)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
+ u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
int ret;
u8 blank[ETH_ALEN];
@@ -2757,7 +2758,7 @@ static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
+ u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
int i, ret;
if (!mdp->cd->tsu)
@@ -2831,15 +2832,15 @@ static int sh_eth_tsu_purge_all(struct net_device *ndev)
static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
+ u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
u8 addr[ETH_ALEN];
- void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
int i;
if (!mdp->cd->tsu)
return;
for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
- sh_eth_tsu_read_entry(reg_offset, addr);
+ sh_eth_tsu_read_entry(ndev, reg_offset, addr);
if (is_multicast_ether_addr(addr))
sh_eth_tsu_del_entry(ndev, addr);
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 726c55a82dd7..f94be99cf400 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -383,12 +383,12 @@ enum ECSIPR_STATUS_MASK_BIT {
/* APR */
enum APR_BIT {
- APR_AP = 0x00000001,
+ APR_AP = 0x0000ffff,
};
/* MPR */
enum MPR_BIT {
- MPR_MP = 0x00000001,
+ MPR_MP = 0x0000ffff,
};
/* TRSCER */
@@ -403,8 +403,7 @@ enum DESC_I_BIT {
/* RPADIR */
enum RPADIR_BIT {
- RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000,
- RPADIR_PADR = 0x0003f,
+ RPADIR_PADS = 0x1f0000, RPADIR_PADR = 0xffff,
};
/* FDR */
@@ -488,7 +487,6 @@ struct sh_eth_cpu_data {
u32 ecsipr_value;
u32 fdr_value;
u32 fcftr_value;
- u32 rpadir_value;
/* interrupt checking mask */
u32 tx_check;
@@ -560,10 +558,4 @@ struct sh_eth_private {
unsigned wol_enabled:1;
};
-static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp,
- int enum_index)
-{
- return mdp->tsu_addr + mdp->reg_offset[enum_index];
-}
-
#endif /* #ifndef __SH_ETH_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
index 542b67d436df..c9aad0eda57f 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -319,6 +319,7 @@ static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv,
case TCP_V4_FLOW:
case UDP_V4_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -329,6 +330,7 @@ static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv,
case TCP_V6_FLOW:
case UDP_V6_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 3bac58d0f88b..c5c297e78d06 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -6,3 +6,5 @@ sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o
obj-$(CONFIG_SFC) += sfc.o
+
+obj-$(CONFIG_SFC_FALCON) += falcon/
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 019cef1d3cf7..3d76fd1504c2 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -199,7 +199,7 @@ static int efx_ef10_sriov_alloc_vf_vswitching(struct efx_nic *efx)
return -ENOMEM;
for (i = 0; i < efx->vf_count; i++) {
- random_ether_addr(nic_data->vf[i].mac);
+ eth_random_addr(nic_data->vf[i].mac);
nic_data->vf[i].efx = NULL;
nic_data->vf[i].vlan = EFX_EF10_NO_VLAN;
@@ -564,7 +564,7 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
struct ef10_vf *vf;
- u16 old_vlan, new_vlan;
+ u16 new_vlan;
int rc = 0, rc2 = 0;
if (vf_i >= efx->vf_count)
@@ -619,7 +619,6 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
}
/* Do the actual vlan change */
- old_vlan = vf->vlan;
vf->vlan = new_vlan;
/* Restore everything in reverse order */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index ce3a177081a8..330233286e78 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -264,11 +264,17 @@ static int efx_check_disabled(struct efx_nic *efx)
static int efx_process_channel(struct efx_channel *channel, int budget)
{
struct efx_tx_queue *tx_queue;
+ struct list_head rx_list;
int spent;
if (unlikely(!channel->enabled))
return 0;
+ /* Prepare the batch receive list */
+ EFX_WARN_ON_PARANOID(channel->rx_list != NULL);
+ INIT_LIST_HEAD(&rx_list);
+ channel->rx_list = &rx_list;
+
efx_for_each_channel_tx_queue(tx_queue, channel) {
tx_queue->pkts_compl = 0;
tx_queue->bytes_compl = 0;
@@ -291,6 +297,10 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
}
}
+ /* Receive any packets we queued up */
+ netif_receive_skb_list(channel->rx_list);
+ channel->rx_list = NULL;
+
return spent;
}
@@ -555,6 +565,8 @@ static int efx_probe_channel(struct efx_channel *channel)
goto fail;
}
+ channel->rx_list = NULL;
+
return 0;
fail:
diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c
index 56049157a5af..1ccdb7a82e2a 100644
--- a/drivers/net/ethernet/sfc/falcon/ethtool.c
+++ b/drivers/net/ethernet/sfc/falcon/ethtool.c
@@ -963,6 +963,7 @@ ef4_ethtool_get_rxnfc(struct net_device *net_dev,
switch (info->flow_type) {
case TCP_V4_FLOW:
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 65568925c3ef..961b92979640 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -448,6 +448,7 @@ enum efx_sync_events_state {
* __efx_rx_packet(), or zero if there is none
* @rx_pkt_index: Ring index of first buffer for next packet to be delivered
* by __efx_rx_packet(), if @rx_pkt_n_frags != 0
+ * @rx_list: list of SKBs from current RX, awaiting processing
* @rx_queue: RX queue for this channel
* @tx_queue: TX queues for this channel
* @sync_events_state: Current state of sync events on this channel
@@ -500,6 +501,8 @@ struct efx_channel {
unsigned int rx_pkt_n_frags;
unsigned int rx_pkt_index;
+ struct list_head *rx_list;
+
struct efx_rx_queue rx_queue;
struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index d2e254f2f72b..396ff01298cd 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -634,7 +634,12 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
return;
/* Pass the packet up */
- netif_receive_skb(skb);
+ if (channel->rx_list != NULL)
+ /* Add to list, will pass up later */
+ list_add_tail(&skb->list, channel->rx_list);
+ else
+ /* No list, so pass it up now */
+ netif_receive_skb(skb);
}
/* Handle a received packet. Second half: Touches packet payload. */
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 949aaef390b6..15c62c160953 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -321,7 +321,6 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
static int card_idx = -1;
void __iomem *ioaddr;
int chip_idx = (int) ent->driver_data;
- int irq;
struct net_device *dev;
struct epic_private *ep;
int i, ret, option = 0, duplex = 0;
@@ -338,7 +337,6 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = pci_enable_device(pdev);
if (ret)
goto out;
- irq = pdev->irq;
if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) {
dev_err(&pdev->dev, "no PCI region space\n");
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index e080d3e7c582..7aa5ebb6766c 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -232,8 +232,7 @@
#define NETSEC_EEPROM_PKT_ME_ADDRESS 0x20
#define NETSEC_EEPROM_PKT_ME_SIZE 0x24
-#define DESC_NUM 128
-#define NAPI_BUDGET (DESC_NUM / 2)
+#define DESC_NUM 256
#define DESC_SZ sizeof(struct netsec_de)
@@ -642,8 +641,6 @@ static struct sk_buff *netsec_get_rx_pkt_data(struct netsec_priv *priv,
tmp_skb = netsec_alloc_skb(priv, &td);
- dma_rmb();
-
tail = dring->tail;
if (!tmp_skb) {
@@ -657,8 +654,6 @@ static struct sk_buff *netsec_get_rx_pkt_data(struct netsec_priv *priv,
/* move tail ahead */
dring->tail = (dring->tail + 1) % DESC_NUM;
- dring->pkt_cnt--;
-
return skb;
}
@@ -731,25 +726,24 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
struct net_device *ndev = priv->ndev;
struct netsec_rx_pkt_info rx_info;
- int done = 0, rx_num = 0;
+ int done = 0;
struct netsec_desc desc;
struct sk_buff *skb;
u16 len;
while (done < budget) {
- if (!rx_num) {
- rx_num = netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
- dring->pkt_cnt += rx_num;
+ u16 idx = dring->tail;
+ struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
- /* move head 'rx_num' */
- dring->head = (dring->head + rx_num) % DESC_NUM;
+ if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD))
+ break;
- rx_num = dring->pkt_cnt;
- if (!rx_num)
- break;
- }
+ /* This barrier is needed to keep us from reading
+ * any other fields out of the netsec_de until we have
+ * verified the descriptor has been written back
+ */
+ dma_rmb();
done++;
- rx_num--;
skb = netsec_get_rx_pkt_data(priv, &rx_info, &desc, &len);
if (unlikely(!skb) || rx_info.err_flag) {
netif_err(priv, drv, priv->ndev,
@@ -780,11 +774,9 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
static int netsec_napi_poll(struct napi_struct *napi, int budget)
{
struct netsec_priv *priv;
- struct net_device *ndev;
int tx, rx, done, todo;
priv = container_of(napi, struct netsec_priv, napi);
- ndev = priv->ndev;
todo = budget;
do {
@@ -1666,7 +1658,7 @@ static int netsec_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "hardware revision %d.%d\n",
hw_ver >> 16, hw_ver & 0xffff);
- netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_BUDGET);
+ netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_POLL_WEIGHT);
ndev->netdev_ops = &netsec_netdev_ops;
ndev->ethtool_ops = &netsec_ethtool_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 68e9e2640c62..99967a80a8c8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -5,7 +5,8 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \
dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o hwif.o \
- stmmac_tc.o $(stmmac-y)
+ stmmac_tc.o dwxgmac2_core.o dwxgmac2_dma.o dwxgmac2_descs.o \
+ $(stmmac-y)
# Ordering matters. Generic driver must be last.
obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 78fd0f8b8e81..1854f270ad66 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -36,12 +36,14 @@
#include "mmc.h"
/* Synopsys Core versions */
-#define DWMAC_CORE_3_40 0x34
-#define DWMAC_CORE_3_50 0x35
-#define DWMAC_CORE_4_00 0x40
-#define DWMAC_CORE_4_10 0x41
-#define DWMAC_CORE_5_00 0x50
-#define DWMAC_CORE_5_10 0x51
+#define DWMAC_CORE_3_40 0x34
+#define DWMAC_CORE_3_50 0x35
+#define DWMAC_CORE_4_00 0x40
+#define DWMAC_CORE_4_10 0x41
+#define DWMAC_CORE_5_00 0x50
+#define DWMAC_CORE_5_10 0x51
+#define DWXGMAC_CORE_2_10 0x21
+
#define STMMAC_CHAN0 0 /* Always supported and default for all chips */
/* These need to be power of two, and >= 4 */
@@ -398,6 +400,8 @@ struct mac_link {
u32 speed10;
u32 speed100;
u32 speed1000;
+ u32 speed2500;
+ u32 speed10000;
u32 duplex;
};
@@ -439,6 +443,7 @@ struct stmmac_rx_routing {
int dwmac100_setup(struct stmmac_priv *priv);
int dwmac1000_setup(struct stmmac_priv *priv);
int dwmac4_setup(struct stmmac_priv *priv);
+int dwxgmac2_setup(struct stmmac_priv *priv);
void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
unsigned int high, unsigned int low);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
index 3304095c934c..fad503820e04 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
@@ -78,6 +78,8 @@ static const struct of_device_id dwmac_generic_match[] = {
{ .compatible = "snps,dwmac-4.00"},
{ .compatible = "snps,dwmac-4.10a"},
{ .compatible = "snps,dwmac"},
+ { .compatible = "snps,dwxgmac-2.10"},
+ { .compatible = "snps,dwxgmac"},
{ }
};
MODULE_DEVICE_TABLE(of, dwmac_generic_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index f08625a02cea..7b923362ee55 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -61,6 +61,7 @@ struct rk_priv_data {
struct clk *mac_clk_tx;
struct clk *clk_mac_ref;
struct clk *clk_mac_refout;
+ struct clk *clk_mac_speed;
struct clk *aclk_mac;
struct clk *pclk_mac;
struct clk *clk_phy;
@@ -83,6 +84,64 @@ struct rk_priv_data {
(((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
+#define PX30_GRF_GMAC_CON1 0x0904
+
+/* PX30_GRF_GMAC_CON1 */
+#define PX30_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | \
+ GRF_BIT(6))
+#define PX30_GMAC_SPEED_10M GRF_CLR_BIT(2)
+#define PX30_GMAC_SPEED_100M GRF_BIT(2)
+
+static void px30_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
+ PX30_GMAC_PHY_INTF_SEL_RMII);
+}
+
+static void px30_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ int ret;
+
+ if (IS_ERR(bsp_priv->clk_mac_speed)) {
+ dev_err(dev, "%s: Missing clk_mac_speed clock\n", __func__);
+ return;
+ }
+
+ if (speed == 10) {
+ regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
+ PX30_GMAC_SPEED_10M);
+
+ ret = clk_set_rate(bsp_priv->clk_mac_speed, 2500000);
+ if (ret)
+ dev_err(dev, "%s: set clk_mac_speed rate 2500000 failed: %d\n",
+ __func__, ret);
+ } else if (speed == 100) {
+ regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
+ PX30_GMAC_SPEED_100M);
+
+ ret = clk_set_rate(bsp_priv->clk_mac_speed, 25000000);
+ if (ret)
+ dev_err(dev, "%s: set clk_mac_speed rate 25000000 failed: %d\n",
+ __func__, ret);
+
+ } else {
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ }
+}
+
+static const struct rk_gmac_ops px30_ops = {
+ .set_to_rmii = px30_set_to_rmii,
+ .set_rmii_speed = px30_set_rmii_speed,
+};
+
#define RK3128_GRF_MAC_CON0 0x0168
#define RK3128_GRF_MAC_CON1 0x016c
@@ -1042,6 +1101,10 @@ static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)
}
}
+ bsp_priv->clk_mac_speed = devm_clk_get(dev, "clk_mac_speed");
+ if (IS_ERR(bsp_priv->clk_mac_speed))
+ dev_err(dev, "cannot get clock %s\n", "clk_mac_speed");
+
if (bsp_priv->clock_input) {
dev_info(dev, "clock input from PHY\n");
} else {
@@ -1094,6 +1157,9 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
if (!IS_ERR(bsp_priv->mac_clk_tx))
clk_prepare_enable(bsp_priv->mac_clk_tx);
+ if (!IS_ERR(bsp_priv->clk_mac_speed))
+ clk_prepare_enable(bsp_priv->clk_mac_speed);
+
/**
* if (!IS_ERR(bsp_priv->clk_mac))
* clk_prepare_enable(bsp_priv->clk_mac);
@@ -1118,6 +1184,8 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
clk_disable_unprepare(bsp_priv->pclk_mac);
clk_disable_unprepare(bsp_priv->mac_clk_tx);
+
+ clk_disable_unprepare(bsp_priv->clk_mac_speed);
/**
* if (!IS_ERR(bsp_priv->clk_mac))
* clk_disable_unprepare(bsp_priv->clk_mac);
@@ -1414,6 +1482,7 @@ static int rk_gmac_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume);
static const struct of_device_id rk_gmac_dwmac_match[] = {
+ { .compatible = "rockchip,px30-gmac", .data = &px30_ops },
{ .compatible = "rockchip,rk3128-gmac", .data = &rk3128_ops },
{ .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops },
{ .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index 65bc3556bd8f..edb6053bd980 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -407,6 +407,19 @@ static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
}
}
+static void dwmac4_qmode(void __iomem *ioaddr, u32 channel, u8 qmode)
+{
+ u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+
+ mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK;
+ if (qmode != MTL_QUEUE_AVB)
+ mtl_tx_op |= MTL_OP_MODE_TXQEN;
+ else
+ mtl_tx_op |= MTL_OP_MODE_TXQEN_AV;
+
+ writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+}
+
static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
{
u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
@@ -441,6 +454,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = {
.set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
.set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
.enable_tso = dwmac4_enable_tso,
+ .qmode = dwmac4_qmode,
.set_bfsize = dwmac4_set_bfsize,
};
@@ -468,5 +482,6 @@ const struct stmmac_dma_ops dwmac410_dma_ops = {
.set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
.set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
.enable_tso = dwmac4_enable_tso,
+ .qmode = dwmac4_qmode,
.set_bfsize = dwmac4_set_bfsize,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
new file mode 100644
index 000000000000..0a80fa25afe3
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+ * stmmac XGMAC definitions.
+ */
+
+#ifndef __STMMAC_DWXGMAC2_H__
+#define __STMMAC_DWXGMAC2_H__
+
+#include "common.h"
+
+/* Misc */
+#define XGMAC_JUMBO_LEN 16368
+
+/* MAC Registers */
+#define XGMAC_TX_CONFIG 0x00000000
+#define XGMAC_CONFIG_SS_OFF 29
+#define XGMAC_CONFIG_SS_MASK GENMASK(30, 29)
+#define XGMAC_CONFIG_SS_10000 (0x0 << XGMAC_CONFIG_SS_OFF)
+#define XGMAC_CONFIG_SS_2500 (0x2 << XGMAC_CONFIG_SS_OFF)
+#define XGMAC_CONFIG_SS_1000 (0x3 << XGMAC_CONFIG_SS_OFF)
+#define XGMAC_CONFIG_SARC GENMASK(22, 20)
+#define XGMAC_CONFIG_SARC_SHIFT 20
+#define XGMAC_CONFIG_JD BIT(16)
+#define XGMAC_CONFIG_TE BIT(0)
+#define XGMAC_CORE_INIT_TX (XGMAC_CONFIG_JD)
+#define XGMAC_RX_CONFIG 0x00000004
+#define XGMAC_CONFIG_ARPEN BIT(31)
+#define XGMAC_CONFIG_GPSL GENMASK(29, 16)
+#define XGMAC_CONFIG_GPSL_SHIFT 16
+#define XGMAC_CONFIG_S2KP BIT(11)
+#define XGMAC_CONFIG_IPC BIT(9)
+#define XGMAC_CONFIG_JE BIT(8)
+#define XGMAC_CONFIG_WD BIT(7)
+#define XGMAC_CONFIG_GPSLCE BIT(6)
+#define XGMAC_CONFIG_CST BIT(2)
+#define XGMAC_CONFIG_ACS BIT(1)
+#define XGMAC_CONFIG_RE BIT(0)
+#define XGMAC_CORE_INIT_RX 0
+#define XGMAC_PACKET_FILTER 0x00000008
+#define XGMAC_FILTER_RA BIT(31)
+#define XGMAC_FILTER_PM BIT(4)
+#define XGMAC_FILTER_HMC BIT(2)
+#define XGMAC_FILTER_PR BIT(0)
+#define XGMAC_HASH_TABLE(x) (0x00000010 + (x) * 4)
+#define XGMAC_RXQ_CTRL0 0x000000a0
+#define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2)
+#define XGMAC_RXQEN_SHIFT(x) ((x) * 2)
+#define XGMAC_RXQ_CTRL2 0x000000a8
+#define XGMAC_RXQ_CTRL3 0x000000ac
+#define XGMAC_PSRQ(x) GENMASK((x) * 8 + 7, (x) * 8)
+#define XGMAC_PSRQ_SHIFT(x) ((x) * 8)
+#define XGMAC_INT_STATUS 0x000000b0
+#define XGMAC_PMTIS BIT(4)
+#define XGMAC_INT_EN 0x000000b4
+#define XGMAC_TSIE BIT(12)
+#define XGMAC_LPIIE BIT(5)
+#define XGMAC_PMTIE BIT(4)
+#define XGMAC_INT_DEFAULT_EN (XGMAC_LPIIE | XGMAC_PMTIE | XGMAC_TSIE)
+#define XGMAC_Qx_TX_FLOW_CTRL(x) (0x00000070 + (x) * 4)
+#define XGMAC_PT GENMASK(31, 16)
+#define XGMAC_PT_SHIFT 16
+#define XGMAC_TFE BIT(1)
+#define XGMAC_RX_FLOW_CTRL 0x00000090
+#define XGMAC_RFE BIT(0)
+#define XGMAC_PMT 0x000000c0
+#define XGMAC_GLBLUCAST BIT(9)
+#define XGMAC_RWKPKTEN BIT(2)
+#define XGMAC_MGKPKTEN BIT(1)
+#define XGMAC_PWRDWN BIT(0)
+#define XGMAC_HW_FEATURE0 0x0000011c
+#define XGMAC_HWFEAT_SAVLANINS BIT(27)
+#define XGMAC_HWFEAT_RXCOESEL BIT(16)
+#define XGMAC_HWFEAT_TXCOESEL BIT(14)
+#define XGMAC_HWFEAT_TSSEL BIT(12)
+#define XGMAC_HWFEAT_AVSEL BIT(11)
+#define XGMAC_HWFEAT_RAVSEL BIT(10)
+#define XGMAC_HWFEAT_ARPOFFSEL BIT(9)
+#define XGMAC_HWFEAT_MGKSEL BIT(7)
+#define XGMAC_HWFEAT_RWKSEL BIT(6)
+#define XGMAC_HWFEAT_GMIISEL BIT(1)
+#define XGMAC_HW_FEATURE1 0x00000120
+#define XGMAC_HWFEAT_TSOEN BIT(18)
+#define XGMAC_HWFEAT_TXFIFOSIZE GENMASK(10, 6)
+#define XGMAC_HWFEAT_RXFIFOSIZE GENMASK(4, 0)
+#define XGMAC_HW_FEATURE2 0x00000124
+#define XGMAC_HWFEAT_PPSOUTNUM GENMASK(26, 24)
+#define XGMAC_HWFEAT_TXCHCNT GENMASK(21, 18)
+#define XGMAC_HWFEAT_RXCHCNT GENMASK(15, 12)
+#define XGMAC_HWFEAT_TXQCNT GENMASK(9, 6)
+#define XGMAC_HWFEAT_RXQCNT GENMASK(3, 0)
+#define XGMAC_MDIO_ADDR 0x00000200
+#define XGMAC_MDIO_DATA 0x00000204
+#define XGMAC_MDIO_C22P 0x00000220
+#define XGMAC_ADDR0_HIGH 0x00000300
+#define XGMAC_AE BIT(31)
+#define XGMAC_DCS GENMASK(19, 16)
+#define XGMAC_DCS_SHIFT 16
+#define XGMAC_ADDR0_LOW 0x00000304
+#define XGMAC_ARP_ADDR 0x00000c10
+#define XGMAC_TIMESTAMP_STATUS 0x00000d20
+#define XGMAC_TXTSC BIT(15)
+#define XGMAC_TXTIMESTAMP_NSEC 0x00000d30
+#define XGMAC_TXTSSTSLO GENMASK(30, 0)
+#define XGMAC_TXTIMESTAMP_SEC 0x00000d34
+
+/* MTL Registers */
+#define XGMAC_MTL_OPMODE 0x00001000
+#define XGMAC_ETSALG GENMASK(6, 5)
+#define XGMAC_WRR (0x0 << 5)
+#define XGMAC_WFQ (0x1 << 5)
+#define XGMAC_DWRR (0x2 << 5)
+#define XGMAC_RAA BIT(2)
+#define XGMAC_MTL_INT_STATUS 0x00001020
+#define XGMAC_MTL_RXQ_DMA_MAP0 0x00001030
+#define XGMAC_MTL_RXQ_DMA_MAP1 0x00001034
+#define XGMAC_QxMDMACH(x) GENMASK((x) * 8 + 3, (x) * 8)
+#define XGMAC_QxMDMACH_SHIFT(x) ((x) * 8)
+#define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x)))
+#define XGMAC_TQS GENMASK(25, 16)
+#define XGMAC_TQS_SHIFT 16
+#define XGMAC_TTC GENMASK(6, 4)
+#define XGMAC_TTC_SHIFT 4
+#define XGMAC_TXQEN GENMASK(3, 2)
+#define XGMAC_TXQEN_SHIFT 2
+#define XGMAC_TSF BIT(1)
+#define XGMAC_MTL_RXQ_OPMODE(x) (0x00001140 + (0x80 * (x)))
+#define XGMAC_RQS GENMASK(25, 16)
+#define XGMAC_RQS_SHIFT 16
+#define XGMAC_EHFC BIT(7)
+#define XGMAC_RSF BIT(5)
+#define XGMAC_RTC GENMASK(1, 0)
+#define XGMAC_RTC_SHIFT 0
+#define XGMAC_MTL_QINTEN(x) (0x00001170 + (0x80 * (x)))
+#define XGMAC_RXOIE BIT(16)
+#define XGMAC_MTL_QINT_STATUS(x) (0x00001174 + (0x80 * (x)))
+#define XGMAC_RXOVFIS BIT(16)
+#define XGMAC_ABPSIS BIT(1)
+#define XGMAC_TXUNFIS BIT(0)
+
+/* DMA Registers */
+#define XGMAC_DMA_MODE 0x00003000
+#define XGMAC_SWR BIT(0)
+#define XGMAC_DMA_SYSBUS_MODE 0x00003004
+#define XGMAC_WR_OSR_LMT GENMASK(29, 24)
+#define XGMAC_WR_OSR_LMT_SHIFT 24
+#define XGMAC_RD_OSR_LMT GENMASK(21, 16)
+#define XGMAC_RD_OSR_LMT_SHIFT 16
+#define XGMAC_EN_LPI BIT(15)
+#define XGMAC_LPI_XIT_PKT BIT(14)
+#define XGMAC_AAL BIT(12)
+#define XGMAC_BLEN GENMASK(7, 1)
+#define XGMAC_BLEN256 BIT(7)
+#define XGMAC_BLEN128 BIT(6)
+#define XGMAC_BLEN64 BIT(5)
+#define XGMAC_BLEN32 BIT(4)
+#define XGMAC_BLEN16 BIT(3)
+#define XGMAC_BLEN8 BIT(2)
+#define XGMAC_BLEN4 BIT(1)
+#define XGMAC_UNDEF BIT(0)
+#define XGMAC_DMA_CH_CONTROL(x) (0x00003100 + (0x80 * (x)))
+#define XGMAC_PBLx8 BIT(16)
+#define XGMAC_DMA_CH_TX_CONTROL(x) (0x00003104 + (0x80 * (x)))
+#define XGMAC_TxPBL GENMASK(21, 16)
+#define XGMAC_TxPBL_SHIFT 16
+#define XGMAC_TSE BIT(12)
+#define XGMAC_OSP BIT(4)
+#define XGMAC_TXST BIT(0)
+#define XGMAC_DMA_CH_RX_CONTROL(x) (0x00003108 + (0x80 * (x)))
+#define XGMAC_RxPBL GENMASK(21, 16)
+#define XGMAC_RxPBL_SHIFT 16
+#define XGMAC_RXST BIT(0)
+#define XGMAC_DMA_CH_TxDESC_LADDR(x) (0x00003114 + (0x80 * (x)))
+#define XGMAC_DMA_CH_RxDESC_LADDR(x) (0x0000311c + (0x80 * (x)))
+#define XGMAC_DMA_CH_TxDESC_TAIL_LPTR(x) (0x00003124 + (0x80 * (x)))
+#define XGMAC_DMA_CH_RxDESC_TAIL_LPTR(x) (0x0000312c + (0x80 * (x)))
+#define XGMAC_DMA_CH_TxDESC_RING_LEN(x) (0x00003130 + (0x80 * (x)))
+#define XGMAC_DMA_CH_RxDESC_RING_LEN(x) (0x00003134 + (0x80 * (x)))
+#define XGMAC_DMA_CH_INT_EN(x) (0x00003138 + (0x80 * (x)))
+#define XGMAC_NIE BIT(15)
+#define XGMAC_AIE BIT(14)
+#define XGMAC_RBUE BIT(7)
+#define XGMAC_RIE BIT(6)
+#define XGMAC_TIE BIT(0)
+#define XGMAC_DMA_INT_DEFAULT_EN (XGMAC_NIE | XGMAC_AIE | XGMAC_RBUE | \
+ XGMAC_RIE | XGMAC_TIE)
+#define XGMAC_DMA_CH_Rx_WATCHDOG(x) (0x0000313c + (0x80 * (x)))
+#define XGMAC_RWT GENMASK(7, 0)
+#define XGMAC_DMA_CH_STATUS(x) (0x00003160 + (0x80 * (x)))
+#define XGMAC_NIS BIT(15)
+#define XGMAC_AIS BIT(14)
+#define XGMAC_FBE BIT(12)
+#define XGMAC_RBU BIT(7)
+#define XGMAC_RI BIT(6)
+#define XGMAC_TPS BIT(1)
+#define XGMAC_TI BIT(0)
+
+/* Descriptors */
+#define XGMAC_TDES2_IOC BIT(31)
+#define XGMAC_TDES2_TTSE BIT(30)
+#define XGMAC_TDES2_B2L GENMASK(29, 16)
+#define XGMAC_TDES2_B2L_SHIFT 16
+#define XGMAC_TDES2_B1L GENMASK(13, 0)
+#define XGMAC_TDES3_OWN BIT(31)
+#define XGMAC_TDES3_CTXT BIT(30)
+#define XGMAC_TDES3_FD BIT(29)
+#define XGMAC_TDES3_LD BIT(28)
+#define XGMAC_TDES3_CPC GENMASK(27, 26)
+#define XGMAC_TDES3_CPC_SHIFT 26
+#define XGMAC_TDES3_TCMSSV BIT(26)
+#define XGMAC_TDES3_THL GENMASK(22, 19)
+#define XGMAC_TDES3_THL_SHIFT 19
+#define XGMAC_TDES3_TSE BIT(18)
+#define XGMAC_TDES3_CIC GENMASK(17, 16)
+#define XGMAC_TDES3_CIC_SHIFT 16
+#define XGMAC_TDES3_TPL GENMASK(17, 0)
+#define XGMAC_TDES3_FL GENMASK(14, 0)
+#define XGMAC_RDES3_OWN BIT(31)
+#define XGMAC_RDES3_CTXT BIT(30)
+#define XGMAC_RDES3_IOC BIT(30)
+#define XGMAC_RDES3_LD BIT(28)
+#define XGMAC_RDES3_CDA BIT(27)
+#define XGMAC_RDES3_ES BIT(15)
+#define XGMAC_RDES3_PL GENMASK(13, 0)
+#define XGMAC_RDES3_TSD BIT(6)
+#define XGMAC_RDES3_TSA BIT(4)
+
+#endif /* __STMMAC_DWXGMAC2_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
new file mode 100644
index 000000000000..d182f82f7b58
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -0,0 +1,371 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+ * stmmac XGMAC support.
+ */
+
+#include "stmmac.h"
+#include "dwxgmac2.h"
+
+static void dwxgmac2_core_init(struct mac_device_info *hw,
+ struct net_device *dev)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ int mtu = dev->mtu;
+ u32 tx, rx;
+
+ tx = readl(ioaddr + XGMAC_TX_CONFIG);
+ rx = readl(ioaddr + XGMAC_RX_CONFIG);
+
+ tx |= XGMAC_CORE_INIT_TX;
+ rx |= XGMAC_CORE_INIT_RX;
+
+ if (mtu >= 9000) {
+ rx |= XGMAC_CONFIG_GPSLCE;
+ rx |= XGMAC_JUMBO_LEN << XGMAC_CONFIG_GPSL_SHIFT;
+ rx |= XGMAC_CONFIG_WD;
+ } else if (mtu > 2000) {
+ rx |= XGMAC_CONFIG_JE;
+ } else if (mtu > 1500) {
+ rx |= XGMAC_CONFIG_S2KP;
+ }
+
+ if (hw->ps) {
+ tx |= XGMAC_CONFIG_TE;
+ tx &= ~hw->link.speed_mask;
+
+ switch (hw->ps) {
+ case SPEED_10000:
+ tx |= hw->link.speed10000;
+ break;
+ case SPEED_2500:
+ tx |= hw->link.speed2500;
+ break;
+ case SPEED_1000:
+ default:
+ tx |= hw->link.speed1000;
+ break;
+ }
+ }
+
+ writel(tx, ioaddr + XGMAC_TX_CONFIG);
+ writel(rx, ioaddr + XGMAC_RX_CONFIG);
+ writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
+}
+
+static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
+{
+ u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
+ u32 rx = readl(ioaddr + XGMAC_RX_CONFIG);
+
+ if (enable) {
+ tx |= XGMAC_CONFIG_TE;
+ rx |= XGMAC_CONFIG_RE;
+ } else {
+ tx &= ~XGMAC_CONFIG_TE;
+ rx &= ~XGMAC_CONFIG_RE;
+ }
+
+ writel(tx, ioaddr + XGMAC_TX_CONFIG);
+ writel(rx, ioaddr + XGMAC_RX_CONFIG);
+}
+
+static int dwxgmac2_rx_ipc(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_RX_CONFIG);
+ if (hw->rx_csum)
+ value |= XGMAC_CONFIG_IPC;
+ else
+ value &= ~XGMAC_CONFIG_IPC;
+ writel(value, ioaddr + XGMAC_RX_CONFIG);
+
+ return !!(readl(ioaddr + XGMAC_RX_CONFIG) & XGMAC_CONFIG_IPC);
+}
+
+static void dwxgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
+ u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_RXQ_CTRL0) & ~XGMAC_RXQEN(queue);
+ if (mode == MTL_QUEUE_AVB)
+ value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
+ else if (mode == MTL_QUEUE_DCB)
+ value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
+ writel(value, ioaddr + XGMAC_RXQ_CTRL0);
+}
+
+static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
+ u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value, reg;
+
+ reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
+
+ value = readl(ioaddr + reg);
+ value &= ~XGMAC_PSRQ(queue);
+ value |= (prio << XGMAC_PSRQ_SHIFT(queue)) & XGMAC_PSRQ(queue);
+
+ writel(value, ioaddr + reg);
+}
+
+static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw,
+ u32 rx_alg)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_MTL_OPMODE);
+ value &= ~XGMAC_RAA;
+
+ switch (rx_alg) {
+ case MTL_RX_ALGORITHM_SP:
+ break;
+ case MTL_RX_ALGORITHM_WSP:
+ value |= XGMAC_RAA;
+ break;
+ default:
+ break;
+ }
+
+ writel(value, ioaddr + XGMAC_MTL_OPMODE);
+}
+
+static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
+ u32 tx_alg)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_MTL_OPMODE);
+ value &= ~XGMAC_ETSALG;
+
+ switch (tx_alg) {
+ case MTL_TX_ALGORITHM_WRR:
+ value |= XGMAC_WRR;
+ break;
+ case MTL_TX_ALGORITHM_WFQ:
+ value |= XGMAC_WFQ;
+ break;
+ case MTL_TX_ALGORITHM_DWRR:
+ value |= XGMAC_DWRR;
+ break;
+ default:
+ break;
+ }
+
+ writel(value, ioaddr + XGMAC_MTL_OPMODE);
+}
+
+static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
+ u32 chan)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value, reg;
+
+ reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
+
+ value = readl(ioaddr + reg);
+ value &= ~XGMAC_QxMDMACH(queue);
+ value |= (chan << XGMAC_QxMDMACH_SHIFT(queue)) & XGMAC_QxMDMACH(queue);
+
+ writel(value, ioaddr + reg);
+}
+
+static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
+ struct stmmac_extra_stats *x)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 stat, en;
+
+ en = readl(ioaddr + XGMAC_INT_EN);
+ stat = readl(ioaddr + XGMAC_INT_STATUS);
+
+ stat &= en;
+
+ if (stat & XGMAC_PMTIS) {
+ x->irq_receive_pmt_irq_n++;
+ readl(ioaddr + XGMAC_PMT);
+ }
+
+ return 0;
+}
+
+static int dwxgmac2_host_mtl_irq_status(struct mac_device_info *hw, u32 chan)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ int ret = 0;
+ u32 status;
+
+ status = readl(ioaddr + XGMAC_MTL_INT_STATUS);
+ if (status & BIT(chan)) {
+ u32 chan_status = readl(ioaddr + XGMAC_MTL_QINT_STATUS(chan));
+
+ if (chan_status & XGMAC_RXOVFIS)
+ ret |= CORE_IRQ_MTL_RX_OVERFLOW;
+
+ writel(~0x0, ioaddr + XGMAC_MTL_QINT_STATUS(chan));
+ }
+
+ return ret;
+}
+
+static void dwxgmac2_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time,
+ u32 tx_cnt)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 i;
+
+ if (fc & FLOW_RX)
+ writel(XGMAC_RFE, ioaddr + XGMAC_RX_FLOW_CTRL);
+ if (fc & FLOW_TX) {
+ for (i = 0; i < tx_cnt; i++) {
+ u32 value = XGMAC_TFE;
+
+ if (duplex)
+ value |= pause_time << XGMAC_PT_SHIFT;
+
+ writel(value, ioaddr + XGMAC_Qx_TX_FLOW_CTRL(i));
+ }
+ }
+}
+
+static void dwxgmac2_pmt(struct mac_device_info *hw, unsigned long mode)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 val = 0x0;
+
+ if (mode & WAKE_MAGIC)
+ val |= XGMAC_PWRDWN | XGMAC_MGKPKTEN;
+ if (mode & WAKE_UCAST)
+ val |= XGMAC_PWRDWN | XGMAC_GLBLUCAST | XGMAC_RWKPKTEN;
+ if (val) {
+ u32 cfg = readl(ioaddr + XGMAC_RX_CONFIG);
+ cfg |= XGMAC_CONFIG_RE;
+ writel(cfg, ioaddr + XGMAC_RX_CONFIG);
+ }
+
+ writel(val, ioaddr + XGMAC_PMT);
+}
+
+static void dwxgmac2_set_umac_addr(struct mac_device_info *hw,
+ unsigned char *addr, unsigned int reg_n)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = (addr[5] << 8) | addr[4];
+ writel(value | XGMAC_AE, ioaddr + XGMAC_ADDR0_HIGH);
+
+ value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ writel(value, ioaddr + XGMAC_ADDR0_LOW);
+}
+
+static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
+ unsigned char *addr, unsigned int reg_n)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 hi_addr, lo_addr;
+
+ /* Read the MAC address from the hardware */
+ hi_addr = readl(ioaddr + XGMAC_ADDR0_HIGH);
+ lo_addr = readl(ioaddr + XGMAC_ADDR0_LOW);
+
+ /* Extract the MAC address from the high and low words */
+ addr[0] = lo_addr & 0xff;
+ addr[1] = (lo_addr >> 8) & 0xff;
+ addr[2] = (lo_addr >> 16) & 0xff;
+ addr[3] = (lo_addr >> 24) & 0xff;
+ addr[4] = hi_addr & 0xff;
+ addr[5] = (hi_addr >> 8) & 0xff;
+}
+
+static void dwxgmac2_set_filter(struct mac_device_info *hw,
+ struct net_device *dev)
+{
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+ u32 value = XGMAC_FILTER_RA;
+
+ if (dev->flags & IFF_PROMISC) {
+ value |= XGMAC_FILTER_PR;
+ } else if ((dev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(dev) > HASH_TABLE_SIZE)) {
+ value |= XGMAC_FILTER_PM;
+ writel(~0x0, ioaddr + XGMAC_HASH_TABLE(0));
+ writel(~0x0, ioaddr + XGMAC_HASH_TABLE(1));
+ }
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+}
+
+const struct stmmac_ops dwxgmac210_ops = {
+ .core_init = dwxgmac2_core_init,
+ .set_mac = dwxgmac2_set_mac,
+ .rx_ipc = dwxgmac2_rx_ipc,
+ .rx_queue_enable = dwxgmac2_rx_queue_enable,
+ .rx_queue_prio = dwxgmac2_rx_queue_prio,
+ .tx_queue_prio = NULL,
+ .rx_queue_routing = NULL,
+ .prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
+ .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
+ .set_mtl_tx_queue_weight = NULL,
+ .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
+ .config_cbs = NULL,
+ .dump_regs = NULL,
+ .host_irq_status = dwxgmac2_host_irq_status,
+ .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
+ .flow_ctrl = dwxgmac2_flow_ctrl,
+ .pmt = dwxgmac2_pmt,
+ .set_umac_addr = dwxgmac2_set_umac_addr,
+ .get_umac_addr = dwxgmac2_get_umac_addr,
+ .set_eee_mode = NULL,
+ .reset_eee_mode = NULL,
+ .set_eee_timer = NULL,
+ .set_eee_pls = NULL,
+ .pcs_ctrl_ane = NULL,
+ .pcs_rane = NULL,
+ .pcs_get_adv_lp = NULL,
+ .debug = NULL,
+ .set_filter = dwxgmac2_set_filter,
+};
+
+int dwxgmac2_setup(struct stmmac_priv *priv)
+{
+ struct mac_device_info *mac = priv->hw;
+
+ dev_info(priv->device, "\tXGMAC2\n");
+
+ priv->dev->priv_flags |= IFF_UNICAST_FLT;
+ mac->pcsr = priv->ioaddr;
+ mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
+ mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
+ mac->mcast_bits_log2 = 0;
+
+ if (mac->multicast_filter_bins)
+ mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+
+ mac->link.duplex = 0;
+ mac->link.speed10 = 0;
+ mac->link.speed100 = 0;
+ mac->link.speed1000 = XGMAC_CONFIG_SS_1000;
+ mac->link.speed2500 = XGMAC_CONFIG_SS_2500;
+ mac->link.speed10000 = XGMAC_CONFIG_SS_10000;
+ mac->link.speed_mask = XGMAC_CONFIG_SS_MASK;
+
+ mac->mii.addr = XGMAC_MDIO_ADDR;
+ mac->mii.data = XGMAC_MDIO_DATA;
+ mac->mii.addr_shift = 16;
+ mac->mii.addr_mask = GENMASK(20, 16);
+ mac->mii.reg_shift = 0;
+ mac->mii.reg_mask = GENMASK(15, 0);
+ mac->mii.clk_csr_shift = 19;
+ mac->mii.clk_csr_mask = GENMASK(21, 19);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
new file mode 100644
index 000000000000..1d858fdec997
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+ * stmmac XGMAC support.
+ */
+
+#include <linux/stmmac.h>
+#include "common.h"
+#include "dwxgmac2.h"
+
+static int dwxgmac2_get_tx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p, void __iomem *ioaddr)
+{
+ unsigned int tdes3 = le32_to_cpu(p->des3);
+ int ret = tx_done;
+
+ if (unlikely(tdes3 & XGMAC_TDES3_OWN))
+ return tx_dma_own;
+ if (likely(!(tdes3 & XGMAC_TDES3_LD)))
+ return tx_not_ls;
+
+ return ret;
+}
+
+static int dwxgmac2_get_rx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p)
+{
+ unsigned int rdes3 = le32_to_cpu(p->des3);
+ int ret = good_frame;
+
+ if (unlikely(rdes3 & XGMAC_RDES3_OWN))
+ return dma_own;
+ if (likely(!(rdes3 & XGMAC_RDES3_LD)))
+ return discard_frame;
+ if (unlikely(rdes3 & XGMAC_RDES3_ES))
+ ret = discard_frame;
+
+ return ret;
+}
+
+static int dwxgmac2_get_tx_len(struct dma_desc *p)
+{
+ return (le32_to_cpu(p->des2) & XGMAC_TDES2_B1L);
+}
+
+static int dwxgmac2_get_tx_owner(struct dma_desc *p)
+{
+ return (le32_to_cpu(p->des3) & XGMAC_TDES3_OWN) > 0;
+}
+
+static void dwxgmac2_set_tx_owner(struct dma_desc *p)
+{
+ p->des3 |= cpu_to_le32(XGMAC_TDES3_OWN);
+}
+
+static void dwxgmac2_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
+{
+ p->des3 = cpu_to_le32(XGMAC_RDES3_OWN);
+
+ if (!disable_rx_ic)
+ p->des3 |= cpu_to_le32(XGMAC_RDES3_IOC);
+}
+
+static int dwxgmac2_get_tx_ls(struct dma_desc *p)
+{
+ return (le32_to_cpu(p->des3) & XGMAC_RDES3_LD) > 0;
+}
+
+static int dwxgmac2_get_rx_frame_len(struct dma_desc *p, int rx_coe)
+{
+ return (le32_to_cpu(p->des3) & XGMAC_RDES3_PL);
+}
+
+static void dwxgmac2_enable_tx_timestamp(struct dma_desc *p)
+{
+ p->des2 |= cpu_to_le32(XGMAC_TDES2_TTSE);
+}
+
+static int dwxgmac2_get_tx_timestamp_status(struct dma_desc *p)
+{
+ return 0; /* Not supported */
+}
+
+static inline void dwxgmac2_get_timestamp(void *desc, u32 ats, u64 *ts)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+ u64 ns = 0;
+
+ ns += le32_to_cpu(p->des1) * 1000000000ULL;
+ ns += le32_to_cpu(p->des0);
+
+ *ts = ns;
+}
+
+static int dwxgmac2_rx_check_timestamp(void *desc)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+ unsigned int rdes3 = le32_to_cpu(p->des3);
+ bool desc_valid, ts_valid;
+
+ desc_valid = !(rdes3 & XGMAC_RDES3_OWN) && (rdes3 & XGMAC_RDES3_CTXT);
+ ts_valid = !(rdes3 & XGMAC_RDES3_TSD) && (rdes3 & XGMAC_RDES3_TSA);
+
+ if (likely(desc_valid && ts_valid))
+ return 0;
+ return -EINVAL;
+}
+
+static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
+ u32 ats)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+ unsigned int rdes3 = le32_to_cpu(p->des3);
+ int ret = -EBUSY;
+
+ if (likely(rdes3 & XGMAC_RDES3_CDA)) {
+ ret = dwxgmac2_rx_check_timestamp(next_desc);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
+ int mode, int end)
+{
+ dwxgmac2_set_rx_owner(p, disable_rx_ic);
+}
+
+static void dwxgmac2_init_tx_desc(struct dma_desc *p, int mode, int end)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+}
+
+static void dwxgmac2_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+ bool csum_flag, int mode, bool tx_own,
+ bool ls, unsigned int tot_pkt_len)
+{
+ unsigned int tdes3 = le32_to_cpu(p->des3);
+
+ p->des2 |= cpu_to_le32(len & XGMAC_TDES2_B1L);
+
+ tdes3 = tot_pkt_len & XGMAC_TDES3_FL;
+ if (is_fs)
+ tdes3 |= XGMAC_TDES3_FD;
+ else
+ tdes3 &= ~XGMAC_TDES3_FD;
+
+ if (csum_flag)
+ tdes3 |= 0x3 << XGMAC_TDES3_CIC_SHIFT;
+ else
+ tdes3 &= ~XGMAC_TDES3_CIC;
+
+ if (ls)
+ tdes3 |= XGMAC_TDES3_LD;
+ else
+ tdes3 &= ~XGMAC_TDES3_LD;
+
+ /* Finally set the OWN bit. Later the DMA will start! */
+ if (tx_own)
+ tdes3 |= XGMAC_TDES3_OWN;
+
+ if (is_fs && tx_own)
+ /* When the own bit, for the first frame, has to be set, all
+ * descriptors for the same frame has to be set before, to
+ * avoid race condition.
+ */
+ dma_wmb();
+
+ p->des3 = cpu_to_le32(tdes3);
+}
+
+static void dwxgmac2_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
+ int len1, int len2, bool tx_own,
+ bool ls, unsigned int tcphdrlen,
+ unsigned int tcppayloadlen)
+{
+ unsigned int tdes3 = le32_to_cpu(p->des3);
+
+ if (len1)
+ p->des2 |= cpu_to_le32(len1 & XGMAC_TDES2_B1L);
+ if (len2)
+ p->des2 |= cpu_to_le32((len2 << XGMAC_TDES2_B2L_SHIFT) &
+ XGMAC_TDES2_B2L);
+ if (is_fs) {
+ tdes3 |= XGMAC_TDES3_FD | XGMAC_TDES3_TSE;
+ tdes3 |= (tcphdrlen << XGMAC_TDES3_THL_SHIFT) &
+ XGMAC_TDES3_THL;
+ tdes3 |= tcppayloadlen & XGMAC_TDES3_TPL;
+ } else {
+ tdes3 &= ~XGMAC_TDES3_FD;
+ }
+
+ if (ls)
+ tdes3 |= XGMAC_TDES3_LD;
+ else
+ tdes3 &= ~XGMAC_TDES3_LD;
+
+ /* Finally set the OWN bit. Later the DMA will start! */
+ if (tx_own)
+ tdes3 |= XGMAC_TDES3_OWN;
+
+ if (is_fs && tx_own)
+ /* When the own bit, for the first frame, has to be set, all
+ * descriptors for the same frame has to be set before, to
+ * avoid race condition.
+ */
+ dma_wmb();
+
+ p->des3 = cpu_to_le32(tdes3);
+}
+
+static void dwxgmac2_release_tx_desc(struct dma_desc *p, int mode)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+}
+
+static void dwxgmac2_set_tx_ic(struct dma_desc *p)
+{
+ p->des2 |= cpu_to_le32(XGMAC_TDES2_IOC);
+}
+
+static void dwxgmac2_set_mss(struct dma_desc *p, unsigned int mss)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = cpu_to_le32(mss);
+ p->des3 = cpu_to_le32(XGMAC_TDES3_CTXT | XGMAC_TDES3_TCMSSV);
+}
+
+static void dwxgmac2_get_addr(struct dma_desc *p, unsigned int *addr)
+{
+ *addr = le32_to_cpu(p->des0);
+}
+
+static void dwxgmac2_set_addr(struct dma_desc *p, dma_addr_t addr)
+{
+ p->des0 = cpu_to_le32(addr);
+ p->des1 = 0;
+}
+
+static void dwxgmac2_clear(struct dma_desc *p)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+}
+
+const struct stmmac_desc_ops dwxgmac210_desc_ops = {
+ .tx_status = dwxgmac2_get_tx_status,
+ .rx_status = dwxgmac2_get_rx_status,
+ .get_tx_len = dwxgmac2_get_tx_len,
+ .get_tx_owner = dwxgmac2_get_tx_owner,
+ .set_tx_owner = dwxgmac2_set_tx_owner,
+ .set_rx_owner = dwxgmac2_set_rx_owner,
+ .get_tx_ls = dwxgmac2_get_tx_ls,
+ .get_rx_frame_len = dwxgmac2_get_rx_frame_len,
+ .enable_tx_timestamp = dwxgmac2_enable_tx_timestamp,
+ .get_tx_timestamp_status = dwxgmac2_get_tx_timestamp_status,
+ .get_rx_timestamp_status = dwxgmac2_get_rx_timestamp_status,
+ .get_timestamp = dwxgmac2_get_timestamp,
+ .set_tx_ic = dwxgmac2_set_tx_ic,
+ .prepare_tx_desc = dwxgmac2_prepare_tx_desc,
+ .prepare_tso_tx_desc = dwxgmac2_prepare_tso_tx_desc,
+ .release_tx_desc = dwxgmac2_release_tx_desc,
+ .init_rx_desc = dwxgmac2_init_rx_desc,
+ .init_tx_desc = dwxgmac2_init_tx_desc,
+ .set_mss = dwxgmac2_set_mss,
+ .get_addr = dwxgmac2_get_addr,
+ .set_addr = dwxgmac2_set_addr,
+ .clear = dwxgmac2_clear,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
new file mode 100644
index 000000000000..20909036e002
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+ * stmmac XGMAC support.
+ */
+
+#include <linux/iopoll.h>
+#include "stmmac.h"
+#include "dwxgmac2.h"
+
+static int dwxgmac2_dma_reset(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_MODE);
+
+ /* DMA SW reset */
+ writel(value | XGMAC_SWR, ioaddr + XGMAC_DMA_MODE);
+
+ return readl_poll_timeout(ioaddr + XGMAC_DMA_MODE, value,
+ !(value & XGMAC_SWR), 0, 100000);
+}
+
+static void dwxgmac2_dma_init(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg, int atds)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
+
+ if (dma_cfg->aal)
+ value |= XGMAC_AAL;
+
+ writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
+}
+
+static void dwxgmac2_dma_init_chan(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg, u32 chan)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan));
+
+ if (dma_cfg->pblx8)
+ value |= XGMAC_PBLx8;
+
+ writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan));
+ writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+}
+
+static void dwxgmac2_dma_init_rx_chan(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_rx_phy, u32 chan)
+{
+ u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+ value &= ~XGMAC_RxPBL;
+ value |= (rxpbl << XGMAC_RxPBL_SHIFT) & XGMAC_RxPBL;
+ writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+
+ writel(dma_rx_phy, ioaddr + XGMAC_DMA_CH_RxDESC_LADDR(chan));
+}
+
+static void dwxgmac2_dma_init_tx_chan(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_tx_phy, u32 chan)
+{
+ u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+ value &= ~XGMAC_TxPBL;
+ value |= (txpbl << XGMAC_TxPBL_SHIFT) & XGMAC_TxPBL;
+ value |= XGMAC_OSP;
+ writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+
+ writel(dma_tx_phy, ioaddr + XGMAC_DMA_CH_TxDESC_LADDR(chan));
+}
+
+static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
+ int i;
+
+ if (axi->axi_lpi_en)
+ value |= XGMAC_EN_LPI;
+ if (axi->axi_xit_frm)
+ value |= XGMAC_LPI_XIT_PKT;
+
+ value &= ~XGMAC_WR_OSR_LMT;
+ value |= (axi->axi_wr_osr_lmt << XGMAC_WR_OSR_LMT_SHIFT) &
+ XGMAC_WR_OSR_LMT;
+
+ value &= ~XGMAC_RD_OSR_LMT;
+ value |= (axi->axi_rd_osr_lmt << XGMAC_RD_OSR_LMT_SHIFT) &
+ XGMAC_RD_OSR_LMT;
+
+ value &= ~XGMAC_BLEN;
+ for (i = 0; i < AXI_BLEN; i++) {
+ if (axi->axi_blen[i])
+ value &= ~XGMAC_UNDEF;
+
+ switch (axi->axi_blen[i]) {
+ case 256:
+ value |= XGMAC_BLEN256;
+ break;
+ case 128:
+ value |= XGMAC_BLEN128;
+ break;
+ case 64:
+ value |= XGMAC_BLEN64;
+ break;
+ case 32:
+ value |= XGMAC_BLEN32;
+ break;
+ case 16:
+ value |= XGMAC_BLEN16;
+ break;
+ case 8:
+ value |= XGMAC_BLEN8;
+ break;
+ case 4:
+ value |= XGMAC_BLEN4;
+ break;
+ }
+ }
+
+ writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
+}
+
+static void dwxgmac2_dma_rx_mode(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
+{
+ u32 value = readl(ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
+ unsigned int rqs = fifosz / 256 - 1;
+
+ if (mode == SF_DMA_MODE) {
+ value |= XGMAC_RSF;
+ } else {
+ value &= ~XGMAC_RSF;
+ value &= ~XGMAC_RTC;
+
+ if (mode <= 64)
+ value |= 0x0 << XGMAC_RTC_SHIFT;
+ else if (mode <= 96)
+ value |= 0x2 << XGMAC_RTC_SHIFT;
+ else
+ value |= 0x3 << XGMAC_RTC_SHIFT;
+ }
+
+ value &= ~XGMAC_RQS;
+ value |= (rqs << XGMAC_RQS_SHIFT) & XGMAC_RQS;
+
+ writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
+
+ /* Enable MTL RX overflow */
+ value = readl(ioaddr + XGMAC_MTL_QINTEN(channel));
+ writel(value | XGMAC_RXOIE, ioaddr + XGMAC_MTL_QINTEN(channel));
+}
+
+static void dwxgmac2_dma_tx_mode(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
+{
+ u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
+ unsigned int tqs = fifosz / 256 - 1;
+
+ if (mode == SF_DMA_MODE) {
+ value |= XGMAC_TSF;
+ } else {
+ value &= ~XGMAC_TSF;
+ value &= ~XGMAC_TTC;
+
+ if (mode <= 64)
+ value |= 0x0 << XGMAC_TTC_SHIFT;
+ else if (mode <= 96)
+ value |= 0x2 << XGMAC_TTC_SHIFT;
+ else if (mode <= 128)
+ value |= 0x3 << XGMAC_TTC_SHIFT;
+ else if (mode <= 192)
+ value |= 0x4 << XGMAC_TTC_SHIFT;
+ else if (mode <= 256)
+ value |= 0x5 << XGMAC_TTC_SHIFT;
+ else if (mode <= 384)
+ value |= 0x6 << XGMAC_TTC_SHIFT;
+ else
+ value |= 0x7 << XGMAC_TTC_SHIFT;
+ }
+
+ value &= ~XGMAC_TXQEN;
+ if (qmode != MTL_QUEUE_AVB)
+ value |= 0x2 << XGMAC_TXQEN_SHIFT;
+ else
+ value |= 0x1 << XGMAC_TXQEN_SHIFT;
+
+ value &= ~XGMAC_TQS;
+ value |= (tqs << XGMAC_TQS_SHIFT) & XGMAC_TQS;
+
+ writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
+}
+
+static void dwxgmac2_enable_dma_irq(void __iomem *ioaddr, u32 chan)
+{
+ writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+}
+
+static void dwxgmac2_disable_dma_irq(void __iomem *ioaddr, u32 chan)
+{
+ writel(0, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+}
+
+static void dwxgmac2_dma_start_tx(void __iomem *ioaddr, u32 chan)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+ value |= XGMAC_TXST;
+ writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+
+ value = readl(ioaddr + XGMAC_TX_CONFIG);
+ value |= XGMAC_CONFIG_TE;
+ writel(value, ioaddr + XGMAC_TX_CONFIG);
+}
+
+static void dwxgmac2_dma_stop_tx(void __iomem *ioaddr, u32 chan)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+ value &= ~XGMAC_TXST;
+ writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+
+ value = readl(ioaddr + XGMAC_TX_CONFIG);
+ value &= ~XGMAC_CONFIG_TE;
+ writel(value, ioaddr + XGMAC_TX_CONFIG);
+}
+
+static void dwxgmac2_dma_start_rx(void __iomem *ioaddr, u32 chan)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+ value |= XGMAC_RXST;
+ writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+
+ value = readl(ioaddr + XGMAC_RX_CONFIG);
+ value |= XGMAC_CONFIG_RE;
+ writel(value, ioaddr + XGMAC_RX_CONFIG);
+}
+
+static void dwxgmac2_dma_stop_rx(void __iomem *ioaddr, u32 chan)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+ value &= ~XGMAC_RXST;
+ writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+
+ value = readl(ioaddr + XGMAC_RX_CONFIG);
+ value &= ~XGMAC_CONFIG_RE;
+ writel(value, ioaddr + XGMAC_RX_CONFIG);
+}
+
+static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
+ struct stmmac_extra_stats *x, u32 chan)
+{
+ u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
+ int ret = 0;
+
+ /* ABNORMAL interrupts */
+ if (unlikely(intr_status & XGMAC_AIS)) {
+ if (unlikely(intr_status & XGMAC_TPS)) {
+ x->tx_process_stopped_irq++;
+ ret |= tx_hard_error;
+ }
+ if (unlikely(intr_status & XGMAC_FBE)) {
+ x->fatal_bus_error_irq++;
+ ret |= tx_hard_error;
+ }
+ }
+
+ /* TX/RX NORMAL interrupts */
+ if (likely(intr_status & XGMAC_NIS)) {
+ x->normal_irq_n++;
+
+ if (likely(intr_status & XGMAC_RI)) {
+ u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+ if (likely(value & XGMAC_RIE)) {
+ x->rx_normal_irq_n++;
+ ret |= handle_rx;
+ }
+ }
+ if (likely(intr_status & XGMAC_TI)) {
+ x->tx_normal_irq_n++;
+ ret |= handle_tx;
+ }
+ }
+
+ /* Clear interrupts */
+ writel(~0x0, ioaddr + XGMAC_DMA_CH_STATUS(chan));
+
+ return ret;
+}
+
+static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
+ struct dma_features *dma_cap)
+{
+ u32 hw_cap;
+
+ /* MAC HW feature 0 */
+ hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0);
+ dma_cap->rx_coe = (hw_cap & XGMAC_HWFEAT_RXCOESEL) >> 16;
+ dma_cap->tx_coe = (hw_cap & XGMAC_HWFEAT_TXCOESEL) >> 14;
+ dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12;
+ dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11;
+ dma_cap->av &= (hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10;
+ dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7;
+ dma_cap->pmt_remote_wake_up = (hw_cap & XGMAC_HWFEAT_RWKSEL) >> 6;
+ dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1;
+
+ /* MAC HW feature 1 */
+ hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
+ dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18;
+ dma_cap->tx_fifo_size =
+ 128 << ((hw_cap & XGMAC_HWFEAT_TXFIFOSIZE) >> 6);
+ dma_cap->rx_fifo_size =
+ 128 << ((hw_cap & XGMAC_HWFEAT_RXFIFOSIZE) >> 0);
+
+ /* MAC HW feature 2 */
+ hw_cap = readl(ioaddr + XGMAC_HW_FEATURE2);
+ dma_cap->pps_out_num = (hw_cap & XGMAC_HWFEAT_PPSOUTNUM) >> 24;
+ dma_cap->number_tx_channel =
+ ((hw_cap & XGMAC_HWFEAT_TXCHCNT) >> 18) + 1;
+ dma_cap->number_rx_channel =
+ ((hw_cap & XGMAC_HWFEAT_RXCHCNT) >> 12) + 1;
+ dma_cap->number_tx_queues =
+ ((hw_cap & XGMAC_HWFEAT_TXQCNT) >> 6) + 1;
+ dma_cap->number_rx_queues =
+ ((hw_cap & XGMAC_HWFEAT_RXQCNT) >> 0) + 1;
+}
+
+static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 nchan)
+{
+ u32 i;
+
+ for (i = 0; i < nchan; i++)
+ writel(riwt & XGMAC_RWT, ioaddr + XGMAC_DMA_CH_Rx_WATCHDOG(i));
+}
+
+static void dwxgmac2_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
+{
+ writel(len, ioaddr + XGMAC_DMA_CH_RxDESC_RING_LEN(chan));
+}
+
+static void dwxgmac2_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
+{
+ writel(len, ioaddr + XGMAC_DMA_CH_TxDESC_RING_LEN(chan));
+}
+
+static void dwxgmac2_set_rx_tail_ptr(void __iomem *ioaddr, u32 ptr, u32 chan)
+{
+ writel(ptr, ioaddr + XGMAC_DMA_CH_RxDESC_TAIL_LPTR(chan));
+}
+
+static void dwxgmac2_set_tx_tail_ptr(void __iomem *ioaddr, u32 ptr, u32 chan)
+{
+ writel(ptr, ioaddr + XGMAC_DMA_CH_TxDESC_TAIL_LPTR(chan));
+}
+
+static void dwxgmac2_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+
+ if (en)
+ value |= XGMAC_TSE;
+ else
+ value &= ~XGMAC_TSE;
+
+ writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+}
+
+static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+ value |= bfsize << 1;
+ writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+}
+
+const struct stmmac_dma_ops dwxgmac210_dma_ops = {
+ .reset = dwxgmac2_dma_reset,
+ .init = dwxgmac2_dma_init,
+ .init_chan = dwxgmac2_dma_init_chan,
+ .init_rx_chan = dwxgmac2_dma_init_rx_chan,
+ .init_tx_chan = dwxgmac2_dma_init_tx_chan,
+ .axi = dwxgmac2_dma_axi,
+ .dump_regs = NULL,
+ .dma_rx_mode = dwxgmac2_dma_rx_mode,
+ .dma_tx_mode = dwxgmac2_dma_tx_mode,
+ .enable_dma_irq = dwxgmac2_enable_dma_irq,
+ .disable_dma_irq = dwxgmac2_disable_dma_irq,
+ .start_tx = dwxgmac2_dma_start_tx,
+ .stop_tx = dwxgmac2_dma_stop_tx,
+ .start_rx = dwxgmac2_dma_start_rx,
+ .stop_rx = dwxgmac2_dma_stop_rx,
+ .dma_interrupt = dwxgmac2_dma_interrupt,
+ .get_hw_feature = dwxgmac2_get_hw_feature,
+ .rx_watchdog = dwxgmac2_rx_watchdog,
+ .set_rx_ring_len = dwxgmac2_set_rx_ring_len,
+ .set_tx_ring_len = dwxgmac2_set_tx_ring_len,
+ .set_rx_tail_ptr = dwxgmac2_set_rx_tail_ptr,
+ .set_tx_tail_ptr = dwxgmac2_set_tx_tail_ptr,
+ .enable_tso = dwxgmac2_enable_tso,
+ .set_bfsize = dwxgmac2_set_bfsize,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 1f50e83cafb2..357309a6d6a5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -72,6 +72,7 @@ static int stmmac_dwmac4_quirks(struct stmmac_priv *priv)
static const struct stmmac_hwif_entry {
bool gmac;
bool gmac4;
+ bool xgmac;
u32 min_id;
const struct stmmac_regs_off regs;
const void *desc;
@@ -87,6 +88,7 @@ static const struct stmmac_hwif_entry {
{
.gmac = false,
.gmac4 = false,
+ .xgmac = false,
.min_id = 0,
.regs = {
.ptp_off = PTP_GMAC3_X_OFFSET,
@@ -103,6 +105,7 @@ static const struct stmmac_hwif_entry {
}, {
.gmac = true,
.gmac4 = false,
+ .xgmac = false,
.min_id = 0,
.regs = {
.ptp_off = PTP_GMAC3_X_OFFSET,
@@ -119,6 +122,7 @@ static const struct stmmac_hwif_entry {
}, {
.gmac = false,
.gmac4 = true,
+ .xgmac = false,
.min_id = 0,
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
@@ -135,6 +139,7 @@ static const struct stmmac_hwif_entry {
}, {
.gmac = false,
.gmac4 = true,
+ .xgmac = false,
.min_id = DWMAC_CORE_4_00,
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
@@ -151,6 +156,7 @@ static const struct stmmac_hwif_entry {
}, {
.gmac = false,
.gmac4 = true,
+ .xgmac = false,
.min_id = DWMAC_CORE_4_10,
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
@@ -167,6 +173,7 @@ static const struct stmmac_hwif_entry {
}, {
.gmac = false,
.gmac4 = true,
+ .xgmac = false,
.min_id = DWMAC_CORE_5_10,
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
@@ -180,11 +187,29 @@ static const struct stmmac_hwif_entry {
.tc = &dwmac510_tc_ops,
.setup = dwmac4_setup,
.quirks = NULL,
- }
+ }, {
+ .gmac = false,
+ .gmac4 = false,
+ .xgmac = true,
+ .min_id = DWXGMAC_CORE_2_10,
+ .regs = {
+ .ptp_off = PTP_XGMAC_OFFSET,
+ .mmc_off = 0,
+ },
+ .desc = &dwxgmac210_desc_ops,
+ .dma = &dwxgmac210_dma_ops,
+ .mac = &dwxgmac210_ops,
+ .hwtimestamp = &stmmac_ptp,
+ .mode = NULL,
+ .tc = NULL,
+ .setup = dwxgmac2_setup,
+ .quirks = NULL,
+ },
};
int stmmac_hwif_init(struct stmmac_priv *priv)
{
+ bool needs_xgmac = priv->plat->has_xgmac;
bool needs_gmac4 = priv->plat->has_gmac4;
bool needs_gmac = priv->plat->has_gmac;
const struct stmmac_hwif_entry *entry;
@@ -195,7 +220,7 @@ int stmmac_hwif_init(struct stmmac_priv *priv)
if (needs_gmac) {
id = stmmac_get_id(priv, GMAC_VERSION);
- } else if (needs_gmac4) {
+ } else if (needs_gmac4 || needs_xgmac) {
id = stmmac_get_id(priv, GMAC4_VERSION);
} else {
id = 0;
@@ -229,6 +254,8 @@ int stmmac_hwif_init(struct stmmac_priv *priv)
continue;
if (needs_gmac4 ^ entry->gmac4)
continue;
+ if (needs_xgmac ^ entry->xgmac)
+ continue;
/* Use synopsys_id var because some setups can override this */
if (priv->synopsys_id < entry->min_id)
continue;
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index fe8b536b13f8..92b8944f26e3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -183,6 +183,7 @@ struct stmmac_dma_ops {
void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
+ void (*qmode)(void __iomem *ioaddr, u32 channel, u8 qmode);
void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan);
};
@@ -236,6 +237,8 @@ struct stmmac_dma_ops {
stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args)
#define stmmac_enable_tso(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, enable_tso, __args)
+#define stmmac_dma_qmode(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, qmode, __args)
#define stmmac_set_dma_bfsize(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, set_bfsize, __args)
@@ -444,17 +447,22 @@ struct stmmac_mode_ops {
struct stmmac_priv;
struct tc_cls_u32_offload;
+struct tc_cbs_qopt_offload;
struct stmmac_tc_ops {
int (*init)(struct stmmac_priv *priv);
int (*setup_cls_u32)(struct stmmac_priv *priv,
struct tc_cls_u32_offload *cls);
+ int (*setup_cbs)(struct stmmac_priv *priv,
+ struct tc_cbs_qopt_offload *qopt);
};
#define stmmac_tc_init(__priv, __args...) \
stmmac_do_callback(__priv, tc, init, __args)
#define stmmac_tc_setup_cls_u32(__priv, __args...) \
stmmac_do_callback(__priv, tc, setup_cls_u32, __args)
+#define stmmac_tc_setup_cbs(__priv, __args...) \
+ stmmac_do_callback(__priv, tc, setup_cbs, __args)
struct stmmac_regs_off {
u32 ptp_off;
@@ -471,6 +479,9 @@ extern const struct stmmac_ops dwmac410_ops;
extern const struct stmmac_dma_ops dwmac410_dma_ops;
extern const struct stmmac_ops dwmac510_ops;
extern const struct stmmac_tc_ops dwmac510_tc_ops;
+extern const struct stmmac_ops dwxgmac210_ops;
+extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
+extern const struct stmmac_desc_ops dwxgmac210_desc_ops;
#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
#define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index ef6a8d39db2f..ff1ffb46198a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -51,6 +51,7 @@
#include <linux/reset.h>
#include <linux/of_mdio.h>
#include "dwmac1000.h"
+#include "dwxgmac2.h"
#include "hwif.h"
#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
@@ -262,6 +263,21 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
else
priv->clk_csr = 0;
}
+
+ if (priv->plat->has_xgmac) {
+ if (clk_rate > 400000000)
+ priv->clk_csr = 0x5;
+ else if (clk_rate > 350000000)
+ priv->clk_csr = 0x4;
+ else if (clk_rate > 300000000)
+ priv->clk_csr = 0x3;
+ else if (clk_rate > 250000000)
+ priv->clk_csr = 0x2;
+ else if (clk_rate > 150000000)
+ priv->clk_csr = 0x1;
+ else
+ priv->clk_csr = 0x0;
+ }
}
static void print_pkt(unsigned char *buf, int len)
@@ -498,7 +514,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
if (!priv->hwts_rx_en)
return;
/* For GMAC4, the valid timestamp is from CTX next desc. */
- if (priv->plat->has_gmac4)
+ if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
desc = np;
/* Check if timestamp is available */
@@ -540,6 +556,9 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
u32 ts_event_en = 0;
u32 value = 0;
u32 sec_inc;
+ bool xmac;
+
+ xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
netdev_alert(priv->dev, "No support for HW time stamping\n");
@@ -575,7 +594,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
/* PTP v1, UDP, any kind of event packet */
config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
/* take time stamp for all event messages */
- if (priv->plat->has_gmac4)
+ if (xmac)
snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
else
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
@@ -610,7 +629,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for all event messages */
- if (priv->plat->has_gmac4)
+ if (xmac)
snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
else
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
@@ -647,7 +666,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for all event messages */
- if (priv->plat->has_gmac4)
+ if (xmac)
snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
else
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
@@ -718,7 +737,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
/* program Sub Second Increment reg */
stmmac_config_sub_second_increment(priv,
priv->ptpaddr, priv->plat->clk_ptp_rate,
- priv->plat->has_gmac4, &sec_inc);
+ xmac, &sec_inc);
temp = div_u64(1000000000ULL, sec_inc);
/* Store sub second increment and flags for later use */
@@ -755,12 +774,14 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
*/
static int stmmac_init_ptp(struct stmmac_priv *priv)
{
+ bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
return -EOPNOTSUPP;
priv->adv_ts = 0;
- /* Check if adv_ts can be enabled for dwmac 4.x core */
- if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
+ /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
+ if (xmac && priv->dma_cap.atime_stamp)
priv->adv_ts = 1;
/* Dwmac 3.x core with extend_desc can support adv_ts */
else if (priv->extend_desc && priv->dma_cap.atime_stamp)
@@ -2173,6 +2194,12 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
return ret;
}
+ /* DMA Configuration */
+ stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
+
+ if (priv->plat->axi)
+ stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
+
/* DMA RX Channel Configuration */
for (chan = 0; chan < rx_channels_count; chan++) {
rx_q = &priv->rx_queue[chan];
@@ -2203,12 +2230,6 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
for (chan = 0; chan < dma_csr_ch; chan++)
stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
- /* DMA Configuration */
- stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
-
- if (priv->plat->axi)
- stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
-
return ret;
}
@@ -2526,9 +2547,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
netdev_warn(priv->dev, "%s: failed debugFS registration\n",
__func__);
#endif
- /* Start the ball rolling... */
- stmmac_start_all_dma(priv);
-
priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
if (priv->use_riwt) {
@@ -2549,6 +2567,9 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
}
+ /* Start the ball rolling... */
+ stmmac_start_all_dma(priv);
+
return 0;
}
@@ -3305,6 +3326,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
int coe = priv->hw->rx_csum;
unsigned int next_entry;
unsigned int count = 0;
+ bool xmac;
+
+ xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
if (netif_msg_rx_status(priv)) {
void *rx_head;
@@ -3406,7 +3430,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
* in case of GMAC4 because it needs
* to refill the used descriptors, always.
*/
- if (unlikely(!priv->plat->has_gmac4 &&
+ if (unlikely(!xmac &&
((frame_len < priv->rx_copybreak) ||
stmmac_rx_threshold_count(rx_q)))) {
skb = netdev_alloc_skb_ip_align(priv->dev,
@@ -3642,7 +3666,9 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 queues_count;
u32 queue;
+ bool xmac;
+ xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
if (priv->irq_wake)
@@ -3661,7 +3687,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
/* To handle GMAC own interrupts */
- if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
+ if ((priv->plat->has_gmac) || xmac) {
int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
int mtl_status;
@@ -3778,7 +3804,7 @@ static int stmmac_setup_tc_block(struct stmmac_priv *priv,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
- priv, priv);
+ priv, priv, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
return 0;
@@ -3795,6 +3821,8 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
switch (type) {
case TC_SETUP_BLOCK:
return stmmac_setup_tc_block(priv, type_data);
+ case TC_SETUP_QDISC_CBS:
+ return stmmac_tc_setup_cbs(priv, priv, type_data);
default:
return -EOPNOTSUPP;
}
@@ -4267,6 +4295,8 @@ int stmmac_dvr_probe(struct device *device,
ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
ndev->max_mtu = JUMBO_LEN;
+ else if (priv->plat->has_xgmac)
+ ndev->max_mtu = XGMAC_JUMBO_LEN;
else
ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
@@ -4288,7 +4318,8 @@ int stmmac_dvr_probe(struct device *device,
* has to be disable and this can be done by passing the
* riwt_off field from the platform.
*/
- if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
+ if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
+ (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
priv->use_riwt = 1;
dev_info(priv->device,
"Enable RX Mitigation via HW Watchdog Timer\n");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 5df1a608e566..b72ef171477e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -29,6 +29,7 @@
#include <linux/phy.h>
#include <linux/slab.h>
+#include "dwxgmac2.h"
#include "stmmac.h"
#define MII_BUSY 0x00000001
@@ -39,6 +40,115 @@
#define MII_GMAC4_WRITE (1 << MII_GMAC4_GOC_SHIFT)
#define MII_GMAC4_READ (3 << MII_GMAC4_GOC_SHIFT)
+/* XGMAC defines */
+#define MII_XGMAC_SADDR BIT(18)
+#define MII_XGMAC_CMD_SHIFT 16
+#define MII_XGMAC_WRITE (1 << MII_XGMAC_CMD_SHIFT)
+#define MII_XGMAC_READ (3 << MII_XGMAC_CMD_SHIFT)
+#define MII_XGMAC_BUSY BIT(22)
+#define MII_XGMAC_MAX_C22ADDR 3
+#define MII_XGMAC_C22P_MASK GENMASK(MII_XGMAC_MAX_C22ADDR, 0)
+
+static int stmmac_xgmac2_c22_format(struct stmmac_priv *priv, int phyaddr,
+ int phyreg, u32 *hw_addr)
+{
+ unsigned int mii_data = priv->hw->mii.data;
+ u32 tmp;
+
+ /* HW does not support C22 addr >= 4 */
+ if (phyaddr > MII_XGMAC_MAX_C22ADDR)
+ return -ENODEV;
+ /* Wait until any existing MII operation is complete */
+ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
+ !(tmp & MII_XGMAC_BUSY), 100, 10000))
+ return -EBUSY;
+
+ /* Set port as Clause 22 */
+ tmp = readl(priv->ioaddr + XGMAC_MDIO_C22P);
+ tmp &= ~MII_XGMAC_C22P_MASK;
+ tmp |= BIT(phyaddr);
+ writel(tmp, priv->ioaddr + XGMAC_MDIO_C22P);
+
+ *hw_addr = (phyaddr << 16) | (phyreg & 0x1f);
+ return 0;
+}
+
+static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
+ u32 tmp, addr, value = MII_XGMAC_BUSY;
+ int ret;
+
+ if (phyreg & MII_ADDR_C45) {
+ return -EOPNOTSUPP;
+ } else {
+ ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr);
+ if (ret)
+ return ret;
+ }
+
+ value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
+ & priv->hw->mii.clk_csr_mask;
+ value |= MII_XGMAC_SADDR | MII_XGMAC_READ;
+
+ /* Wait until any existing MII operation is complete */
+ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
+ !(tmp & MII_XGMAC_BUSY), 100, 10000))
+ return -EBUSY;
+
+ /* Set the MII address register to read */
+ writel(addr, priv->ioaddr + mii_address);
+ writel(value, priv->ioaddr + mii_data);
+
+ /* Wait until any existing MII operation is complete */
+ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
+ !(tmp & MII_XGMAC_BUSY), 100, 10000))
+ return -EBUSY;
+
+ /* Read the data from the MII data register */
+ return readl(priv->ioaddr + mii_data) & GENMASK(15, 0);
+}
+
+static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr,
+ int phyreg, u16 phydata)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
+ u32 addr, tmp, value = MII_XGMAC_BUSY;
+ int ret;
+
+ if (phyreg & MII_ADDR_C45) {
+ return -EOPNOTSUPP;
+ } else {
+ ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr);
+ if (ret)
+ return ret;
+ }
+
+ value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
+ & priv->hw->mii.clk_csr_mask;
+ value |= phydata | MII_XGMAC_SADDR;
+ value |= MII_XGMAC_WRITE;
+
+ /* Wait until any existing MII operation is complete */
+ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
+ !(tmp & MII_XGMAC_BUSY), 100, 10000))
+ return -EBUSY;
+
+ /* Set the MII address register to write */
+ writel(addr, priv->ioaddr + mii_address);
+ writel(value, priv->ioaddr + mii_data);
+
+ /* Wait until any existing MII operation is complete */
+ return readl_poll_timeout(priv->ioaddr + mii_data, tmp,
+ !(tmp & MII_XGMAC_BUSY), 100, 10000);
+}
+
/**
* stmmac_mdio_read
* @bus: points to the mii_bus structure
@@ -205,7 +315,7 @@ int stmmac_mdio_register(struct net_device *ndev)
struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
struct device_node *mdio_node = priv->plat->mdio_node;
struct device *dev = ndev->dev.parent;
- int addr, found;
+ int addr, found, max_addr;
if (!mdio_bus_data)
return 0;
@@ -223,8 +333,23 @@ int stmmac_mdio_register(struct net_device *ndev)
#endif
new_bus->name = "stmmac";
- new_bus->read = &stmmac_mdio_read;
- new_bus->write = &stmmac_mdio_write;
+
+ if (priv->plat->has_xgmac) {
+ new_bus->read = &stmmac_xgmac2_mdio_read;
+ new_bus->write = &stmmac_xgmac2_mdio_write;
+
+ /* Right now only C22 phys are supported */
+ max_addr = MII_XGMAC_MAX_C22ADDR + 1;
+
+ /* Check if DT specified an unsupported phy addr */
+ if (priv->plat->phy_addr > MII_XGMAC_MAX_C22ADDR)
+ dev_err(dev, "Unsupported phy_addr (max=%d)\n",
+ MII_XGMAC_MAX_C22ADDR);
+ } else {
+ new_bus->read = &stmmac_mdio_read;
+ new_bus->write = &stmmac_mdio_write;
+ max_addr = PHY_MAX_ADDR;
+ }
new_bus->reset = &stmmac_mdio_reset;
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
@@ -243,7 +368,7 @@ int stmmac_mdio_register(struct net_device *ndev)
goto bus_register_done;
found = 0;
- for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+ for (addr = 0; addr < max_addr; addr++) {
struct phy_device *phydev = mdiobus_get_phy(new_bus, addr);
if (!phydev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 6a393b16a1fc..c54a50dbd5ac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -303,7 +303,7 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static int stmmac_pci_suspend(struct device *dev)
+static int __maybe_unused stmmac_pci_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
int ret;
@@ -321,7 +321,7 @@ static int stmmac_pci_suspend(struct device *dev)
return 0;
}
-static int stmmac_pci_resume(struct device *dev)
+static int __maybe_unused stmmac_pci_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
int ret;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 72da77b94ecd..3609c7b696c7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -486,6 +486,12 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
plat->force_sf_dma_mode = 1;
}
+ if (of_device_is_compatible(np, "snps,dwxgmac")) {
+ plat->has_xgmac = 1;
+ plat->pmt = 1;
+ plat->tso_en = of_property_read_bool(np, "snps,tso");
+ }
+
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
GFP_KERNEL);
if (!dma_cfg) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 0cb0e39a2be9..2293e21f789f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -71,6 +71,9 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
u32 sec, nsec;
u32 quotient, reminder;
int neg_adj = 0;
+ bool xmac;
+
+ xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
if (delta < 0) {
neg_adj = 1;
@@ -82,8 +85,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
nsec = reminder;
spin_lock_irqsave(&priv->ptp_lock, flags);
- stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj,
- priv->plat->has_gmac4);
+ stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj, xmac);
spin_unlock_irqrestore(&priv->ptp_lock, flags);
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index f4b31d69f60e..ecccf895fd7e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -21,6 +21,7 @@
#ifndef __STMMAC_PTP_H__
#define __STMMAC_PTP_H__
+#define PTP_XGMAC_OFFSET 0xd00
#define PTP_GMAC4_OFFSET 0xb00
#define PTP_GMAC3_X_OFFSET 0x700
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 2258cd8cc844..1a96dd9c1091 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -289,7 +289,67 @@ static int tc_init(struct stmmac_priv *priv)
return 0;
}
+static int tc_setup_cbs(struct stmmac_priv *priv,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ u32 queue = qopt->queue;
+ u32 ptr, speed_div;
+ u32 mode_to_use;
+ u64 value;
+ int ret;
+
+ /* Queue 0 is not AVB capable */
+ if (queue <= 0 || queue >= tx_queues_count)
+ return -EINVAL;
+ if (priv->speed != SPEED_100 && priv->speed != SPEED_1000)
+ return -EOPNOTSUPP;
+
+ mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
+ if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) {
+ ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB);
+ if (ret)
+ return ret;
+
+ priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
+ } else if (!qopt->enable) {
+ return stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_DCB);
+ }
+
+ /* Port Transmit Rate and Speed Divider */
+ ptr = (priv->speed == SPEED_100) ? 4 : 8;
+ speed_div = (priv->speed == SPEED_100) ? 100000 : 1000000;
+
+ /* Final adjustments for HW */
+ value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
+ priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
+
+ value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div);
+ priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
+
+ value = qopt->hicredit * 1024ll * 8;
+ priv->plat->tx_queues_cfg[queue].high_credit = value & GENMASK(31, 0);
+
+ value = qopt->locredit * 1024ll * 8;
+ priv->plat->tx_queues_cfg[queue].low_credit = value & GENMASK(31, 0);
+
+ ret = stmmac_config_cbs(priv, priv->hw,
+ priv->plat->tx_queues_cfg[queue].send_slope,
+ priv->plat->tx_queues_cfg[queue].idle_slope,
+ priv->plat->tx_queues_cfg[queue].high_credit,
+ priv->plat->tx_queues_cfg[queue].low_credit,
+ queue);
+ if (ret)
+ return ret;
+
+ dev_info(priv->device, "CBS queue %d: send %d, idle %d, hi %d, lo %d\n",
+ queue, qopt->sendslope, qopt->idleslope,
+ qopt->hicredit, qopt->locredit);
+ return 0;
+}
+
const struct stmmac_tc_ops dwmac510_tc_ops = {
.init = tc_init,
.setup_cls_u32 = tc_setup_cls_u32,
+ .setup_cbs = tc_setup_cbs,
};
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index a5dd627fe2f9..d42f47f6c632 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -101,7 +101,8 @@ static struct vnet_port *vsw_tx_port_find(struct sk_buff *skb,
}
static u16 vsw_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct vnet_port *port = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 88c12474a0c3..9319d84bf49f 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -1225,25 +1225,9 @@ static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
bmsr = err;
if (bmsr & BMSR_LSTATUS) {
- u16 adv, lpa;
-
- err = mii_read(np, np->phy_addr, MII_ADVERTISE);
- if (err < 0)
- goto out;
- adv = err;
-
- err = mii_read(np, np->phy_addr, MII_LPA);
- if (err < 0)
- goto out;
- lpa = err;
-
- err = mii_read(np, np->phy_addr, MII_ESTATUS);
- if (err < 0)
- goto out;
link_up = 1;
current_speed = SPEED_1000;
current_duplex = DUPLEX_FULL;
-
}
lp->active_speed = current_speed;
lp->active_duplex = current_duplex;
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index a94f50442613..12539b357a78 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -234,7 +234,8 @@ static struct vnet_port *vnet_tx_port_find(struct sk_buff *skb,
}
static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct vnet *vp = netdev_priv(dev);
struct vnet_port *port = __tx_port_find(vp, skb);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 163d8d16bc24..dc966ddb6d81 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1151,7 +1151,6 @@ static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
struct rx_map *dm;
struct rxf_fifo *f;
struct rxdb *db;
- struct sk_buff *skb;
int delta;
ENTER;
@@ -1161,7 +1160,6 @@ static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
DBG("db=%p f=%p\n", db, f);
dm = bdx_rxdb_addr_elem(db, rxdd->va_lo);
DBG("dm=%p\n", dm);
- skb = dm->skb;
rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr);
rxfd->info = CPU_CHIP_SWAP32(0x10003); /* INFO=1 BC=3 */
rxfd->va_lo = rxdd->va_lo;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 3e34cb8ac1d3..832bce07c385 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -39,12 +39,15 @@
#include <linux/sys_soc.h>
#include <linux/pinctrl/consumer.h>
+#include <net/pkt_cls.h>
#include "cpsw.h"
#include "cpsw_ale.h"
#include "cpts.h"
#include "davinci_cpdma.h"
+#include <net/pkt_sched.h>
+
#define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
NETIF_MSG_DRV | NETIF_MSG_LINK | \
NETIF_MSG_IFUP | NETIF_MSG_INTR | \
@@ -153,6 +156,12 @@ do { \
#define IRQ_NUM 2
#define CPSW_MAX_QUEUES 8
#define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256
+#define CPSW_FIFO_QUEUE_TYPE_SHIFT 16
+#define CPSW_FIFO_SHAPE_EN_SHIFT 16
+#define CPSW_FIFO_RATE_EN_SHIFT 20
+#define CPSW_TC_NUM 4
+#define CPSW_FIFO_SHAPERS_NUM (CPSW_TC_NUM - 1)
+#define CPSW_PCT_MASK 0x7f
#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT 29
#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK GENMASK(2, 0)
@@ -253,23 +262,24 @@ struct cpsw_ss_regs {
#define RX_DSCP_PRI_MAP7 0x4c /* Rx DSCP Priority to Rx Packet Mapping */
/* Bit definitions for the CPSW2_CONTROL register */
-#define PASS_PRI_TAGGED (1<<24) /* Pass Priority Tagged */
-#define VLAN_LTYPE2_EN (1<<21) /* VLAN LTYPE 2 enable */
-#define VLAN_LTYPE1_EN (1<<20) /* VLAN LTYPE 1 enable */
-#define DSCP_PRI_EN (1<<16) /* DSCP Priority Enable */
-#define TS_320 (1<<14) /* Time Sync Dest Port 320 enable */
-#define TS_319 (1<<13) /* Time Sync Dest Port 319 enable */
-#define TS_132 (1<<12) /* Time Sync Dest IP Addr 132 enable */
-#define TS_131 (1<<11) /* Time Sync Dest IP Addr 131 enable */
-#define TS_130 (1<<10) /* Time Sync Dest IP Addr 130 enable */
-#define TS_129 (1<<9) /* Time Sync Dest IP Addr 129 enable */
-#define TS_TTL_NONZERO (1<<8) /* Time Sync Time To Live Non-zero enable */
-#define TS_ANNEX_F_EN (1<<6) /* Time Sync Annex F enable */
-#define TS_ANNEX_D_EN (1<<4) /* Time Sync Annex D enable */
-#define TS_LTYPE2_EN (1<<3) /* Time Sync LTYPE 2 enable */
-#define TS_LTYPE1_EN (1<<2) /* Time Sync LTYPE 1 enable */
-#define TS_TX_EN (1<<1) /* Time Sync Transmit Enable */
-#define TS_RX_EN (1<<0) /* Time Sync Receive Enable */
+#define PASS_PRI_TAGGED BIT(24) /* Pass Priority Tagged */
+#define VLAN_LTYPE2_EN BIT(21) /* VLAN LTYPE 2 enable */
+#define VLAN_LTYPE1_EN BIT(20) /* VLAN LTYPE 1 enable */
+#define DSCP_PRI_EN BIT(16) /* DSCP Priority Enable */
+#define TS_107 BIT(15) /* Tyme Sync Dest IP Address 107 */
+#define TS_320 BIT(14) /* Time Sync Dest Port 320 enable */
+#define TS_319 BIT(13) /* Time Sync Dest Port 319 enable */
+#define TS_132 BIT(12) /* Time Sync Dest IP Addr 132 enable */
+#define TS_131 BIT(11) /* Time Sync Dest IP Addr 131 enable */
+#define TS_130 BIT(10) /* Time Sync Dest IP Addr 130 enable */
+#define TS_129 BIT(9) /* Time Sync Dest IP Addr 129 enable */
+#define TS_TTL_NONZERO BIT(8) /* Time Sync Time To Live Non-zero enable */
+#define TS_ANNEX_F_EN BIT(6) /* Time Sync Annex F enable */
+#define TS_ANNEX_D_EN BIT(4) /* Time Sync Annex D enable */
+#define TS_LTYPE2_EN BIT(3) /* Time Sync LTYPE 2 enable */
+#define TS_LTYPE1_EN BIT(2) /* Time Sync LTYPE 1 enable */
+#define TS_TX_EN BIT(1) /* Time Sync Transmit Enable */
+#define TS_RX_EN BIT(0) /* Time Sync Receive Enable */
#define CTRL_V2_TS_BITS \
(TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
@@ -281,7 +291,7 @@ struct cpsw_ss_regs {
#define CTRL_V3_TS_BITS \
- (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
+ (TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
TS_LTYPE1_EN)
@@ -453,6 +463,9 @@ struct cpsw_priv {
u8 mac_addr[ETH_ALEN];
bool rx_pause;
bool tx_pause;
+ bool mqprio_hw;
+ int fifo_bw[CPSW_TC_NUM];
+ int shp_cfg_speed;
u32 emac_port;
struct cpsw_common *cpsw;
};
@@ -552,40 +565,28 @@ static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
(func)(slave++, ##arg); \
} while (0)
-#define cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb) \
- do { \
- if (!cpsw->data.dual_emac) \
- break; \
- if (CPDMA_RX_SOURCE_PORT(status) == 1) { \
- ndev = cpsw->slaves[0].ndev; \
- skb->dev = ndev; \
- } else if (CPDMA_RX_SOURCE_PORT(status) == 2) { \
- ndev = cpsw->slaves[1].ndev; \
- skb->dev = ndev; \
- } \
- } while (0)
-#define cpsw_add_mcast(cpsw, priv, addr) \
- do { \
- if (cpsw->data.dual_emac) { \
- struct cpsw_slave *slave = cpsw->slaves + \
- priv->emac_port; \
- int slave_port = cpsw_get_slave_port( \
- slave->slave_num); \
- cpsw_ale_add_mcast(cpsw->ale, addr, \
- 1 << slave_port | ALE_PORT_HOST, \
- ALE_VLAN, slave->port_vlan, 0); \
- } else { \
- cpsw_ale_add_mcast(cpsw->ale, addr, \
- ALE_ALL_PORTS, \
- 0, 0, 0); \
- } \
- } while (0)
-
static inline int cpsw_get_slave_port(u32 slave_num)
{
return slave_num + 1;
}
+static void cpsw_add_mcast(struct cpsw_priv *priv, u8 *addr)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ if (cpsw->data.dual_emac) {
+ struct cpsw_slave *slave = cpsw->slaves + priv->emac_port;
+ int slave_port = cpsw_get_slave_port(slave->slave_num);
+
+ cpsw_ale_add_mcast(cpsw->ale, addr,
+ 1 << slave_port | ALE_PORT_HOST,
+ ALE_VLAN, slave->port_vlan, 0);
+ return;
+ }
+
+ cpsw_ale_add_mcast(cpsw->ale, addr, ALE_ALL_PORTS, 0, 0, 0);
+}
+
static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
{
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
@@ -693,7 +694,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
/* program multicast address list into ALE register */
netdev_for_each_mc_addr(ha, ndev) {
- cpsw_add_mcast(cpsw, priv, (u8 *)ha->addr);
+ cpsw_add_mcast(priv, ha->addr);
}
}
}
@@ -785,10 +786,16 @@ static void cpsw_rx_handler(void *token, int len, int status)
struct sk_buff *skb = token;
struct sk_buff *new_skb;
struct net_device *ndev = skb->dev;
- int ret = 0;
+ int ret = 0, port;
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
- cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb);
+ if (cpsw->data.dual_emac) {
+ port = CPDMA_RX_SOURCE_PORT(status);
+ if (port) {
+ ndev = cpsw->slaves[--port].ndev;
+ skb->dev = ndev;
+ }
+ }
if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
/* In dual emac mode check for all interfaces */
@@ -967,8 +974,8 @@ static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
/* process every unprocessed channel */
ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
- for (ch = 0, num_tx = 0; ch_map; ch_map >>= 1, ch++) {
- if (!(ch_map & 0x01))
+ for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
+ if (!(ch_map & 0x80))
continue;
txv = &cpsw->txv[ch];
@@ -1077,6 +1084,38 @@ static void cpsw_set_slave_mac(struct cpsw_slave *slave,
slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
}
+static bool cpsw_shp_is_off(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 shift, mask, val;
+
+ val = readl_relaxed(&cpsw->regs->ptype);
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
+ mask = 7 << shift;
+ val = val & mask;
+
+ return !val;
+}
+
+static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 shift, mask, val;
+
+ val = readl_relaxed(&cpsw->regs->ptype);
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
+ mask = (1 << --fifo) << shift;
+ val = on ? val | mask : val & ~mask;
+
+ writel_relaxed(val, &cpsw->regs->ptype);
+}
+
static void _cpsw_adjust_link(struct cpsw_slave *slave,
struct cpsw_priv *priv, bool *link)
{
@@ -1116,6 +1155,12 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
mac_control |= BIT(4);
*link = true;
+
+ if (priv->shp_cfg_speed &&
+ priv->shp_cfg_speed != slave->phy->speed &&
+ !cpsw_shp_is_off(priv))
+ dev_warn(priv->dev,
+ "Speed was changed, CBS shaper speeds are changed!");
} else {
mac_control = 0;
/* disable forwarding */
@@ -1577,6 +1622,231 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
soft_reset_slave(slave);
}
+static int cpsw_tc_to_fifo(int tc, int num_tc)
+{
+ if (tc == num_tc - 1)
+ return 0;
+
+ return CPSW_FIFO_SHAPERS_NUM - tc;
+}
+
+static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 val = 0, send_pct, shift;
+ struct cpsw_slave *slave;
+ int pct = 0, i;
+
+ if (bw > priv->shp_cfg_speed * 1000)
+ goto err;
+
+ /* shaping has to stay enabled for highest fifos linearly
+ * and fifo bw no more then interface can allow
+ */
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ send_pct = slave_read(slave, SEND_PERCENT);
+ for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
+ if (!bw) {
+ if (i >= fifo || !priv->fifo_bw[i])
+ continue;
+
+ dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
+ continue;
+ }
+
+ if (!priv->fifo_bw[i] && i > fifo) {
+ dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
+ return -EINVAL;
+ }
+
+ shift = (i - 1) * 8;
+ if (i == fifo) {
+ send_pct &= ~(CPSW_PCT_MASK << shift);
+ val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
+ if (!val)
+ val = 1;
+
+ send_pct |= val << shift;
+ pct += val;
+ continue;
+ }
+
+ if (priv->fifo_bw[i])
+ pct += (send_pct >> shift) & CPSW_PCT_MASK;
+ }
+
+ if (pct >= 100)
+ goto err;
+
+ slave_write(slave, send_pct, SEND_PERCENT);
+ priv->fifo_bw[fifo] = bw;
+
+ dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
+ DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
+
+ return 0;
+err:
+ dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
+ return -EINVAL;
+}
+
+static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 tx_in_ctl_rg, val;
+ int ret;
+
+ ret = cpsw_set_fifo_bw(priv, fifo, bw);
+ if (ret)
+ return ret;
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
+ CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
+
+ if (!bw)
+ cpsw_fifo_shp_on(priv, fifo, bw);
+
+ val = slave_read(slave, tx_in_ctl_rg);
+ if (cpsw_shp_is_off(priv)) {
+ /* disable FIFOs rate limited queues */
+ val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
+
+ /* set type of FIFO queues to normal priority mode */
+ val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
+
+ /* set type of FIFO queues to be rate limited */
+ if (bw)
+ val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
+ else
+ priv->shp_cfg_speed = 0;
+ }
+
+ /* toggle a FIFO rate limited queue */
+ if (bw)
+ val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
+ else
+ val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
+ slave_write(slave, val, tx_in_ctl_rg);
+
+ /* FIFO transmit shape enable */
+ cpsw_fifo_shp_on(priv, fifo, bw);
+ return 0;
+}
+
+/* Defaults:
+ * class A - prio 3
+ * class B - prio 2
+ * shaping for class A should be set first
+ */
+static int cpsw_set_cbs(struct net_device *ndev,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ int prev_speed = 0;
+ int tc, ret, fifo;
+ u32 bw = 0;
+
+ tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
+
+ /* enable channels in backward order, as highest FIFOs must be rate
+ * limited first and for compliance with CPDMA rate limited channels
+ * that also used in bacward order. FIFO0 cannot be rate limited.
+ */
+ fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
+ if (!fifo) {
+ dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
+ return -EINVAL;
+ }
+
+ /* do nothing, it's disabled anyway */
+ if (!qopt->enable && !priv->fifo_bw[fifo])
+ return 0;
+
+ /* shapers can be set if link speed is known */
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ if (slave->phy && slave->phy->link) {
+ if (priv->shp_cfg_speed &&
+ priv->shp_cfg_speed != slave->phy->speed)
+ prev_speed = priv->shp_cfg_speed;
+
+ priv->shp_cfg_speed = slave->phy->speed;
+ }
+
+ if (!priv->shp_cfg_speed) {
+ dev_err(priv->dev, "Link speed is not known");
+ return -1;
+ }
+
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(cpsw->dev);
+ return ret;
+ }
+
+ bw = qopt->enable ? qopt->idleslope : 0;
+ ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
+ if (ret) {
+ priv->shp_cfg_speed = prev_speed;
+ prev_speed = 0;
+ }
+
+ if (bw && prev_speed)
+ dev_warn(priv->dev,
+ "Speed was changed, CBS shaper speeds are changed!");
+
+ pm_runtime_put_sync(cpsw->dev);
+ return ret;
+}
+
+static void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ int fifo, bw;
+
+ for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
+ bw = priv->fifo_bw[fifo];
+ if (!bw)
+ continue;
+
+ cpsw_set_fifo_rlimit(priv, fifo, bw);
+ }
+}
+
+static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 tx_prio_map = 0;
+ int i, tc, fifo;
+ u32 tx_prio_rg;
+
+ if (!priv->mqprio_hw)
+ return;
+
+ for (i = 0; i < 8; i++) {
+ tc = netdev_get_prio_tc_map(priv->ndev, i);
+ fifo = CPSW_FIFO_SHAPERS_NUM - tc;
+ tx_prio_map |= fifo << (4 * i);
+ }
+
+ tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
+ CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
+
+ slave_write(slave, tx_prio_map, tx_prio_rg);
+}
+
+/* restore resources after port reset */
+static void cpsw_restore(struct cpsw_priv *priv)
+{
+ /* restore MQPRIO offload */
+ for_each_slave(priv, cpsw_mqprio_resume, priv);
+
+ /* restore CBS offload */
+ for_each_slave(priv, cpsw_cbs_resume, priv);
+}
+
static int cpsw_ndo_open(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
@@ -1656,6 +1926,8 @@ static int cpsw_ndo_open(struct net_device *ndev)
}
+ cpsw_restore(priv);
+
/* Enable Interrupt pacing if configured */
if (cpsw->coal_intvl != 0) {
struct ethtool_coalesce coal;
@@ -2187,6 +2459,78 @@ static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
return ret;
}
+static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
+{
+ struct tc_mqprio_qopt_offload *mqprio = type_data;
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int fifo, num_tc, count, offset;
+ struct cpsw_slave *slave;
+ u32 tx_prio_map = 0;
+ int i, tc, ret;
+
+ num_tc = mqprio->qopt.num_tc;
+ if (num_tc > CPSW_TC_NUM)
+ return -EINVAL;
+
+ if (mqprio->mode != TC_MQPRIO_MODE_DCB)
+ return -EINVAL;
+
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(cpsw->dev);
+ return ret;
+ }
+
+ if (num_tc) {
+ for (i = 0; i < 8; i++) {
+ tc = mqprio->qopt.prio_tc_map[i];
+ fifo = cpsw_tc_to_fifo(tc, num_tc);
+ tx_prio_map |= fifo << (4 * i);
+ }
+
+ netdev_set_num_tc(ndev, num_tc);
+ for (i = 0; i < num_tc; i++) {
+ count = mqprio->qopt.count[i];
+ offset = mqprio->qopt.offset[i];
+ netdev_set_tc_queue(ndev, i, count, offset);
+ }
+ }
+
+ if (!mqprio->qopt.hw) {
+ /* restore default configuration */
+ netdev_reset_tc(ndev);
+ tx_prio_map = TX_PRIORITY_MAPPING;
+ }
+
+ priv->mqprio_hw = mqprio->qopt.hw;
+
+ offset = cpsw->version == CPSW_VERSION_1 ?
+ CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ slave_write(slave, tx_prio_map, offset);
+
+ pm_runtime_put_sync(cpsw->dev);
+
+ return 0;
+}
+
+static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_CBS:
+ return cpsw_set_cbs(ndev, type_data);
+
+ case TC_SETUP_QDISC_MQPRIO:
+ return cpsw_set_mqprio(ndev, type_data);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops cpsw_netdev_ops = {
.ndo_open = cpsw_ndo_open,
.ndo_stop = cpsw_ndo_stop,
@@ -2202,6 +2546,7 @@ static const struct net_device_ops cpsw_netdev_ops = {
#endif
.ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid,
+ .ndo_setup_tc = cpsw_ndo_setup_tc,
};
static int cpsw_get_regs_len(struct net_device *ndev)
@@ -2428,7 +2773,7 @@ static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
void (*handler)(void *, int, int);
struct netdev_queue *queue;
struct cpsw_vector *vec;
- int ret, *ch;
+ int ret, *ch, vch;
if (rx) {
ch = &cpsw->rx_ch_num;
@@ -2441,7 +2786,8 @@ static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
}
while (*ch < ch_num) {
- vec[*ch].ch = cpdma_chan_create(cpsw->dma, *ch, handler, rx);
+ vch = rx ? *ch : 7 - *ch;
+ vec[*ch].ch = cpdma_chan_create(cpsw->dma, vch, handler, rx);
queue = netdev_get_tx_queue(priv->ndev, *ch);
queue->tx_maxrate = 0;
@@ -2924,7 +3270,7 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n",
priv_sl2->mac_addr);
} else {
- random_ether_addr(priv_sl2->mac_addr);
+ eth_random_addr(priv_sl2->mac_addr);
dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n",
priv_sl2->mac_addr);
}
@@ -2932,7 +3278,7 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
priv_sl2->emac_port = 1;
cpsw->slaves[1].ndev = ndev;
- ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
@@ -2978,7 +3324,7 @@ static int cpsw_probe(struct platform_device *pdev)
u32 slave_offset, sliver_offset, slave_size;
const struct soc_device_attribute *soc;
struct cpsw_common *cpsw;
- int ret = 0, i;
+ int ret = 0, i, ch;
int irq;
cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL);
@@ -3153,7 +3499,8 @@ static int cpsw_probe(struct platform_device *pdev)
if (soc)
cpsw->quirk_irq = 1;
- cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0);
+ ch = cpsw->quirk_irq ? 0 : 7;
+ cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
if (IS_ERR(cpsw->txv[0].ch)) {
dev_err(priv->dev, "error initializing tx dma channel\n");
ret = PTR_ERR(cpsw->txv[0].ch);
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 6f63c8729afc..b96b93c686bf 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -114,7 +114,10 @@ static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
dev_consume_skb_any(skb);
dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n",
mtype, seqid);
- } else if (time_after(jiffies, skb_cb->tmo)) {
+ break;
+ }
+
+ if (time_after(jiffies, skb_cb->tmo)) {
/* timeout any expired skbs over 1s */
dev_dbg(cpts->dev,
"expiring tx timestamp mtype %u seqid %04x\n",
@@ -158,6 +161,7 @@ static int cpts_fifo_read(struct cpts *cpts, int match)
*/
break;
}
+ /* fall through */
case CPTS_EV_PUSH:
case CPTS_EV_RX:
list_del_init(&event->list);
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 4f1267477aa4..4236dcdd5634 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -406,37 +406,36 @@ static int cpdma_chan_fit_rate(struct cpdma_chan *ch, u32 rate,
struct cpdma_chan *chan;
u32 old_rate = ch->rate;
u32 new_rmask = 0;
- int rlim = 1;
+ int rlim = 0;
int i;
- *prio_mode = 0;
for (i = tx_chan_num(0); i < tx_chan_num(CPDMA_MAX_CHANNELS); i++) {
chan = ctlr->channels[i];
- if (!chan) {
- rlim = 0;
+ if (!chan)
continue;
- }
if (chan == ch)
chan->rate = rate;
if (chan->rate) {
- if (rlim) {
- new_rmask |= chan->mask;
- } else {
- ch->rate = old_rate;
- dev_err(ctlr->dev, "Prev channel of %dch is not rate limited\n",
- chan->chan_num);
- return -EINVAL;
- }
- } else {
- *prio_mode = 1;
- rlim = 0;
+ rlim = 1;
+ new_rmask |= chan->mask;
+ continue;
}
+
+ if (rlim)
+ goto err;
}
*rmask = new_rmask;
+ *prio_mode = rlim;
return 0;
+
+err:
+ ch->rate = old_rate;
+ dev_err(ctlr->dev, "Upper cpdma ch%d is not rate limited\n",
+ chan->chan_num);
+ return -EINVAL;
}
static u32 cpdma_chan_set_factors(struct cpdma_ctlr *ctlr,
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index e40aa3e31af2..a1d335a3c5e4 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1889,13 +1889,6 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
return err;
}
-static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv,
- select_queue_fallback_t fallback)
-{
- return 0;
-}
-
static int netcp_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
@@ -1972,7 +1965,7 @@ static const struct net_device_ops netcp_netdev_ops = {
.ndo_vlan_rx_add_vid = netcp_rx_add_vid,
.ndo_vlan_rx_kill_vid = netcp_rx_kill_vid,
.ndo_tx_timeout = netcp_ndo_tx_timeout,
- .ndo_select_queue = netcp_select_queue,
+ .ndo_select_queue = dev_pick_tx_zero,
.ndo_setup_tc = netcp_setup_tc,
};
@@ -2052,7 +2045,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
if (is_valid_ether_addr(efuse_mac_addr))
ether_addr_copy(ndev->dev_addr, efuse_mac_addr);
else
- random_ether_addr(ndev->dev_addr);
+ eth_random_addr(ndev->dev_addr);
devm_iounmap(dev, efuse);
devm_release_mem_region(dev, res.start, size);
@@ -2061,7 +2054,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
if (mac_addr)
ether_addr_copy(ndev->dev_addr, mac_addr);
else
- random_ether_addr(ndev->dev_addr);
+ eth_random_addr(ndev->dev_addr);
}
ret = of_property_read_string(node_interface, "rx-channel",
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index c769cd9d11e7..93d142867c2a 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -966,6 +966,7 @@ static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
switch (cmd) {
case SIOCGMIIPHY: /* get address of MII PHY in use. */
data->phy_id = phy;
+ /* fall through */
case SIOCGMIIREG: /* read MII PHY register. */
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 2a0c06e0f730..42f1f518dad6 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -70,7 +70,8 @@
#define XEL_TSR_XMIT_IE_MASK 0x00000008 /* Tx interrupt enable bit */
#define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000 /* Buffer is active, SW bit
* only. This is not documented
- * in the HW spec */
+ * in the HW spec
+ */
/* Define for programming the MAC address into the EmacLite */
#define XEL_TSR_PROG_MAC_ADDR (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_PROGRAM_MASK)
@@ -94,11 +95,11 @@
-#define TX_TIMEOUT (60*HZ) /* Tx timeout is 60 seconds. */
+#define TX_TIMEOUT (60 * HZ) /* Tx timeout is 60 seconds. */
#define ALIGNMENT 4
/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
-#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT)
+#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32)adr)) % ALIGNMENT)
#ifdef __BIG_ENDIAN
#define xemaclite_readl ioread32be
@@ -238,8 +239,8 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
/* Set up to output the remaining data */
align_buffer = 0;
- to_u8_ptr = (u8 *) &align_buffer;
- from_u8_ptr = (u8 *) from_u16_ptr;
+ to_u8_ptr = (u8 *)&align_buffer;
+ from_u8_ptr = (u8 *)from_u16_ptr;
/* Output the remaining data */
for (; length > 0; length--)
@@ -272,7 +273,7 @@ static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr,
u32 align_buffer;
from_u32_ptr = src_ptr;
- to_u16_ptr = (u16 *) dest_ptr;
+ to_u16_ptr = (u16 *)dest_ptr;
for (; length > 3; length -= 4) {
/* Copy each word into the temporary buffer */
@@ -288,9 +289,9 @@ static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr,
u8 *to_u8_ptr, *from_u8_ptr;
/* Set up to read the remaining data */
- to_u8_ptr = (u8 *) to_u16_ptr;
+ to_u8_ptr = (u8 *)to_u16_ptr;
align_buffer = *from_u32_ptr++;
- from_u8_ptr = (u8 *) &align_buffer;
+ from_u8_ptr = (u8 *)&align_buffer;
/* Read the remaining data */
for (; length > 0; length--)
@@ -336,7 +337,8 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
drvdata->next_tx_buf_to_use ^= XEL_BUFFER_OFFSET;
} else if (drvdata->tx_ping_pong != 0) {
/* If the expected buffer is full, try the other buffer,
- * if it is configured in HW */
+ * if it is configured in HW
+ */
addr = (void __iomem __force *)((u32 __force)addr ^
XEL_BUFFER_OFFSET);
@@ -349,7 +351,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
return -1; /* Buffer was full, return failure */
/* Write the frame to the buffer */
- xemaclite_aligned_write(data, (u32 __force *) addr, byte_count);
+ xemaclite_aligned_write(data, (u32 __force *)addr, byte_count);
xemaclite_writel((byte_count & XEL_TPLR_LENGTH_MASK),
addr + XEL_TPLR_OFFSET);
@@ -357,7 +359,8 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
/* Update the Tx Status Register to indicate that there is a
* frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which
* is used by the interrupt handler to check whether a frame
- * has been transmitted */
+ * has been transmitted
+ */
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK);
xemaclite_writel(reg_data, addr + XEL_TSR_OFFSET);
@@ -369,6 +372,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
* xemaclite_recv_data - Receive a frame
* @drvdata: Pointer to the Emaclite device private data
* @data: Address where the data is to be received
+ * @maxlen: Maximum supported ethernet packet length
*
* This function is intended to be called from the interrupt context or
* with a wrapper which waits for the receive frame to be available.
@@ -394,7 +398,8 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
/* The instance is out of sync, try other buffer if other
* buffer is configured, return 0 otherwise. If the instance is
* out of sync, do not update the 'next_rx_buf_to_use' since it
- * will correct on subsequent calls */
+ * will correct on subsequent calls
+ */
if (drvdata->rx_ping_pong != 0)
addr = (void __iomem __force *)((u32 __force)addr ^
XEL_BUFFER_OFFSET);
@@ -408,13 +413,15 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
return 0; /* No data was available */
}
- /* Get the protocol type of the ethernet frame that arrived */
+ /* Get the protocol type of the ethernet frame that arrived
+ */
proto_type = ((ntohl(xemaclite_readl(addr + XEL_HEADER_OFFSET +
XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) &
XEL_RPLR_LENGTH_MASK);
/* Check if received ethernet frame is a raw ethernet frame
- * or an IP packet or an ARP packet */
+ * or an IP packet or an ARP packet
+ */
if (proto_type > ETH_DATA_LEN) {
if (proto_type == ETH_P_IP) {
@@ -430,7 +437,8 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
length = XEL_ARP_PACKET_SIZE + ETH_HLEN + ETH_FCS_LEN;
else
/* Field contains type other than IP or ARP, use max
- * frame size and let user parse it */
+ * frame size and let user parse it
+ */
length = ETH_FRAME_LEN + ETH_FCS_LEN;
} else
/* Use the length in the frame, plus the header and trailer */
@@ -440,7 +448,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
length = maxlen;
/* Read from the EmacLite device */
- xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET),
+ xemaclite_aligned_read((u32 __force *)(addr + XEL_RXBUFF_OFFSET),
data, length);
/* Acknowledge the frame */
@@ -471,7 +479,7 @@ static void xemaclite_update_address(struct net_local *drvdata,
/* Determine the expected Tx buffer address */
addr = drvdata->base_addr + drvdata->next_tx_buf_to_use;
- xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN);
+ xemaclite_aligned_write(address_ptr, (u32 __force *)addr, ETH_ALEN);
xemaclite_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
@@ -488,7 +496,7 @@ static void xemaclite_update_address(struct net_local *drvdata,
/**
* xemaclite_set_mac_address - Set the MAC address for this device
* @dev: Pointer to the network device instance
- * @addr: Void pointer to the sockaddr structure
+ * @address: Void pointer to the sockaddr structure
*
* This function copies the HW address from the sockaddr strucutre to the
* net_device structure and updates the address in HW.
@@ -564,19 +572,19 @@ static void xemaclite_tx_handler(struct net_device *dev)
struct net_local *lp = netdev_priv(dev);
dev->stats.tx_packets++;
- if (lp->deferred_skb) {
- if (xemaclite_send_data(lp,
- (u8 *) lp->deferred_skb->data,
- lp->deferred_skb->len) != 0)
- return;
- else {
- dev->stats.tx_bytes += lp->deferred_skb->len;
- dev_kfree_skb_irq(lp->deferred_skb);
- lp->deferred_skb = NULL;
- netif_trans_update(dev); /* prevent tx timeout */
- netif_wake_queue(dev);
- }
- }
+
+ if (!lp->deferred_skb)
+ return;
+
+ if (xemaclite_send_data(lp, (u8 *)lp->deferred_skb->data,
+ lp->deferred_skb->len))
+ return;
+
+ dev->stats.tx_bytes += lp->deferred_skb->len;
+ dev_kfree_skb_irq(lp->deferred_skb);
+ lp->deferred_skb = NULL;
+ netif_trans_update(dev); /* prevent tx timeout */
+ netif_wake_queue(dev);
}
/**
@@ -602,18 +610,18 @@ static void xemaclite_rx_handler(struct net_device *dev)
return;
}
- /*
- * A new skb should have the data halfword aligned, but this code is
+ /* A new skb should have the data halfword aligned, but this code is
* here just in case that isn't true. Calculate how many
* bytes we should reserve to get the data to start on a word
- * boundary */
+ * boundary
+ */
align = BUFFER_ALIGN(skb->data);
if (align)
skb_reserve(skb, align);
skb_reserve(skb, 2);
- len = xemaclite_recv_data(lp, (u8 *) skb->data, len);
+ len = xemaclite_recv_data(lp, (u8 *)skb->data, len);
if (!len) {
dev->stats.rx_errors++;
@@ -639,6 +647,8 @@ static void xemaclite_rx_handler(struct net_device *dev)
* @dev_id: Void pointer to the network device instance used as callback
* reference
*
+ * Return: IRQ_HANDLED
+ *
* This function handles the Tx and Rx interrupts of the EmacLite device.
*/
static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
@@ -706,8 +716,8 @@ static int xemaclite_mdio_wait(struct net_local *lp)
unsigned long end = jiffies + 2;
/* wait for the MDIO interface to not be busy or timeout
- after some time.
- */
+ * after some time.
+ */
while (xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
XEL_MDIOCTRL_MDIOSTS_MASK) {
if (time_before_eq(end, jiffies)) {
@@ -757,7 +767,7 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET);
dev_dbg(&lp->ndev->dev,
- "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
+ "%s(phy_id=%i, reg=%x) == %x\n", __func__,
phy_id, reg, rc);
return rc;
@@ -772,6 +782,8 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
*
* This function waits till the device is ready to accept a new MDIO
* request and then writes the val to the MDIO Write Data register.
+ *
+ * Return: 0 upon success or a negative error upon failure
*/
static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
u16 val)
@@ -780,7 +792,7 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
u32 ctrl_reg;
dev_dbg(&lp->ndev->dev,
- "xemaclite_mdio_write(phy_id=%i, reg=%x, val=%x)\n",
+ "%s(phy_id=%i, reg=%x, val=%x)\n", __func__,
phy_id, reg, val);
if (xemaclite_mdio_wait(lp))
@@ -805,7 +817,7 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
/**
* xemaclite_mdio_setup - Register mii_bus for the Emaclite device
* @lp: Pointer to the Emaclite device private data
- * @ofdev: Pointer to OF device structure
+ * @dev: Pointer to OF device structure
*
* This function enables MDIO bus in the Emaclite device and registers a
* mii_bus.
@@ -905,6 +917,9 @@ static void xemaclite_adjust_link(struct net_device *ndev)
* This function sets the MAC address, requests an IRQ and enables interrupts
* for the Emaclite device and starts the Tx queue.
* It also connects to the phy device, if MDIO is included in Emaclite device.
+ *
+ * Return: 0 on success. -ENODEV, if PHY cannot be connected.
+ * Non-zero error value on failure.
*/
static int xemaclite_open(struct net_device *dev)
{
@@ -975,6 +990,8 @@ static int xemaclite_open(struct net_device *dev)
* This function stops the Tx queue, disables interrupts and frees the IRQ for
* the Emaclite device.
* It also disconnects the phy device associated with the Emaclite device.
+ *
+ * Return: 0, always.
*/
static int xemaclite_close(struct net_device *dev)
{
@@ -1017,10 +1034,11 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
new_skb = orig_skb;
spin_lock_irqsave(&lp->reset_lock, flags);
- if (xemaclite_send_data(lp, (u8 *) new_skb->data, len) != 0) {
+ if (xemaclite_send_data(lp, (u8 *)new_skb->data, len) != 0) {
/* If the Emaclite Tx buffer is busy, stop the Tx queue and
* defer the skb for transmission during the ISR, after the
- * current transmission is complete */
+ * current transmission is complete
+ */
netif_stop_queue(dev);
lp->deferred_skb = new_skb;
/* Take the time stamp now, since we can't do this in an ISR. */
@@ -1052,13 +1070,12 @@ static bool get_bool(struct platform_device *ofdev, const char *s)
{
u32 *p = (u32 *)of_get_property(ofdev->dev.of_node, s, NULL);
- if (p) {
- return (bool)*p;
- } else {
- dev_warn(&ofdev->dev, "Parameter %s not found,"
- "defaulting to false\n", s);
+ if (!p) {
+ dev_warn(&ofdev->dev, "Parameter %s not found, defaulting to false\n", s);
return false;
}
+
+ return (bool)*p;
}
static const struct net_device_ops xemaclite_netdev_ops;
@@ -1066,7 +1083,6 @@ static const struct net_device_ops xemaclite_netdev_ops;
/**
* xemaclite_of_probe - Probe method for the Emaclite device.
* @ofdev: Pointer to OF device structure
- * @match: Pointer to the structure used for matching a device
*
* This function probes for the Emaclite device in the device tree.
* It initializes the driver data structure and the hardware, sets the MAC